summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.bzrignore1
-rwxr-xr-xBUILD/SETUP.sh4
-rw-r--r--CMakeLists.txt15
-rw-r--r--VERSION10
-rw-r--r--client/completion_hash.cc2
-rw-r--r--client/mysql.cc2
-rw-r--r--client/mysqladmin.cc55
-rw-r--r--client/mysqlbinlog.cc34
-rw-r--r--client/mysqlcheck.c6
-rw-r--r--client/mysqldump.c8
-rw-r--r--client/mysqltest.cc79
-rw-r--r--cmake/build_configurations/mysql_release.cmake2
-rw-r--r--cmake/cpack_rpm.cmake4
-rw-r--r--cmake/maintainer.cmake2
-rw-r--r--cmake/mysql_version.cmake4
-rw-r--r--config.h.cmake1
-rw-r--r--configure.cmake18
-rw-r--r--extra/comp_err.c8
-rw-r--r--extra/perror.c5
-rw-r--r--extra/replace.c4
-rw-r--r--extra/resolve_stack_dump.c2
-rw-r--r--extra/yassl/CMakeLists.txt4
-rw-r--r--extra/yassl/src/crypto_wrapper.cpp21
-rw-r--r--extra/yassl/src/template_instnt.cpp110
-rw-r--r--extra/yassl/src/yassl_int.cpp10
-rw-r--r--extra/yassl/taocrypt/CMakeLists.txt4
-rw-r--r--extra/yassl/taocrypt/src/algebra.cpp10
-rw-r--r--extra/yassl/taocrypt/src/integer.cpp12
-rw-r--r--extra/yassl/taocrypt/src/template_instnt.cpp81
-rw-r--r--include/hash.h1
-rw-r--r--include/heap.h1
-rw-r--r--include/ma_dyncol.h102
-rw-r--r--include/maria.h3
-rw-r--r--include/my_base.h27
-rw-r--r--include/my_dir.h5
-rw-r--r--include/my_global.h2
-rw-r--r--include/my_handler_errors.h (renamed from mysys/my_handler_errors.h)2
-rw-r--r--include/my_sys.h48
-rw-r--r--include/my_tree.h6
-rw-r--r--include/myisamchk.h3
-rw-r--r--include/mysql.h.pp4
-rw-r--r--include/mysql/plugin.h2
-rw-r--r--include/mysql/plugin_audit.h.pp2
-rw-r--r--include/mysql/plugin_auth.h.pp2
-rw-r--r--include/mysql/plugin_ftparser.h.pp2
-rw-r--r--include/mysql_com.h8
-rw-r--r--libmysql/CMakeLists.txt1
-rw-r--r--libmysql/errmsg.c8
-rw-r--r--libmysql/libmysql.c7
-rw-r--r--libmysqld/CMakeLists.txt5
-rw-r--r--libmysqld/emb_qcache.cc2
-rw-r--r--libmysqld/lib_sql.cc12
-rw-r--r--mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test4
-rw-r--r--mysql-test/extra/rpl_tests/create_recursive_construct.inc17
-rw-r--r--mysql-test/extra/rpl_tests/rpl_deadlock.test2
-rw-r--r--mysql-test/extra/rpl_tests/rpl_insert_delayed.test4
-rw-r--r--mysql-test/extra/rpl_tests/rpl_log.test1
-rw-r--r--mysql-test/extra/rpl_tests/rpl_max_relay_size.test3
-rw-r--r--mysql-test/extra/rpl_tests/rpl_show_relaylog_events.inc2
-rw-r--r--mysql-test/include/alter_table_mdev539.inc65
-rw-r--r--mysql-test/include/binlog_start_pos.inc8
-rw-r--r--mysql-test/include/function_defaults.inc1166
-rw-r--r--mysql-test/include/function_defaults_notembedded.inc94
-rw-r--r--mysql-test/include/have_cassandra.inc10
-rw-r--r--mysql-test/include/have_cassandra.opt1
-rw-r--r--mysql-test/include/have_dbi_dbd-mysql.inc4
-rw-r--r--mysql-test/include/have_debug_sync.inc9
-rw-r--r--mysql-test/include/have_stat_tables.inc5
-rw-r--r--mysql-test/include/have_stat_tables.opt1
-rw-r--r--mysql-test/include/mtr_warnings.sql2
-rw-r--r--mysql-test/include/not_openssl.inc4
-rw-r--r--mysql-test/include/not_ssl.inc4
-rw-r--r--mysql-test/include/setup_fake_relay_log.inc4
-rw-r--r--mysql-test/include/show_binlog_events.inc2
-rw-r--r--mysql-test/include/show_binlog_events2.inc2
-rw-r--r--mysql-test/include/show_events.inc29
-rw-r--r--mysql-test/include/system_db_struct.inc3
-rw-r--r--mysql-test/include/wait_for_binlog_checkpoint.inc53
-rw-r--r--mysql-test/include/world_schema_utf8.inc25
-rwxr-xr-xmysql-test/mysql-test-run.pl7
-rw-r--r--mysql-test/r/1st.result3
-rw-r--r--mysql-test/r/alter_table.result6
-rw-r--r--mysql-test/r/alter_table_mdev539_maria.result252
-rw-r--r--mysql-test/r/alter_table_mdev539_myisam.result252
-rw-r--r--mysql-test/r/cassandra.result593
-rw-r--r--mysql-test/r/comments.result16
-rw-r--r--mysql-test/r/connect.result9
-rw-r--r--mysql-test/r/create.result12
-rw-r--r--mysql-test/r/dyncol.result352
-rw-r--r--mysql-test/r/filesort_debug.result2
-rw-r--r--mysql-test/r/flush.result7
-rw-r--r--mysql-test/r/func_system.result12
-rw-r--r--mysql-test/r/func_time.result6
-rw-r--r--mysql-test/r/function_defaults.result3067
-rw-r--r--mysql-test/r/function_defaults_notembedded.result171
-rw-r--r--mysql-test/r/group_by.result45
-rw-r--r--mysql-test/r/have_debug_sync.require2
-rw-r--r--mysql-test/r/information_schema.result6
-rw-r--r--mysql-test/r/information_schema_all_engines.result2
-rw-r--r--mysql-test/r/innodb_mysql_sync.result4
-rw-r--r--mysql-test/r/is_debug_build.require2
-rw-r--r--mysql-test/r/loaddata.result6
-rw-r--r--mysql-test/r/log_slow.result2
-rw-r--r--mysql-test/r/log_tables.result16
-rw-r--r--mysql-test/r/log_tables_upgrade.result3
-rw-r--r--mysql-test/r/lowercase_table2.result2
-rw-r--r--mysql-test/r/mdev-504.result21
-rw-r--r--mysql-test/r/myisam-system.result2
-rw-r--r--mysql-test/r/myisam.result2
-rw-r--r--mysql-test/r/myisampack.result2
-rw-r--r--mysql-test/r/mysql_upgrade.result21
-rw-r--r--mysql-test/r/mysql_upgrade_ssl.result3
-rw-r--r--mysql-test/r/mysqlbinlog.result1
-rw-r--r--mysql-test/r/mysqlcheck.result12
-rw-r--r--mysql-test/r/mysqld--help.result17
-rw-r--r--mysql-test/r/mysqldump-max.result10
-rw-r--r--mysql-test/r/mysqldump.result4
-rw-r--r--mysql-test/r/mysqltest.result2
-rw-r--r--mysql-test/r/not_openssl.require2
-rw-r--r--mysql-test/r/order_by.result885
-rw-r--r--mysql-test/r/order_by_sortkey.result159
-rw-r--r--mysql-test/r/parser.result2
-rw-r--r--mysql-test/r/partition_datatype.result6
-rw-r--r--mysql-test/r/partition_innodb_plugin.result18
-rw-r--r--mysql-test/r/partition_open_files_limit.result2
-rw-r--r--mysql-test/r/ps_1general.result2
-rw-r--r--mysql-test/r/range_vs_index_merge.result2
-rw-r--r--mysql-test/r/row-checksum-old.result2
-rw-r--r--mysql-test/r/row-checksum.result2
-rw-r--r--mysql-test/r/server_id.require2
-rw-r--r--mysql-test/r/server_id1.require2
-rw-r--r--mysql-test/r/show_explain.result1100
-rw-r--r--mysql-test/r/show_explain_ps.result27
-rw-r--r--mysql-test/r/signal_code.result16
-rw-r--r--mysql-test/r/sp-code.result4
-rw-r--r--mysql-test/r/sp.result14
-rw-r--r--mysql-test/r/sql_mode.result2
-rw-r--r--mysql-test/r/stat_tables.result383
-rw-r--r--mysql-test/r/stat_tables_disabled.result70
-rw-r--r--mysql-test/r/stat_tables_innodb.result412
-rw-r--r--mysql-test/r/stat_tables_par.result242
-rw-r--r--mysql-test/r/stat_tables_par_innodb.result253
-rw-r--r--mysql-test/r/stat_tables_partition.result12
-rw-r--r--mysql-test/r/stat_tables_rbr.result26
-rw-r--r--mysql-test/r/stat_tables_repl.result41
-rw-r--r--mysql-test/r/statistics.result1429
-rw-r--r--mysql-test/r/status_user.result4
-rw-r--r--mysql-test/r/str_to_datetime_457.result51
-rw-r--r--mysql-test/r/subselect_innodb.result41
-rw-r--r--mysql-test/r/subselect_mat_cost.result8
-rw-r--r--mysql-test/r/symlink.result4
-rw-r--r--mysql-test/r/system_mysql_db.result38
-rw-r--r--mysql-test/r/system_mysql_db_fix40123.result38
-rw-r--r--mysql-test/r/system_mysql_db_fix50030.result38
-rw-r--r--mysql-test/r/system_mysql_db_fix50117.result38
-rw-r--r--mysql-test/r/testdb_only.require2
-rw-r--r--mysql-test/r/trigger.result2
-rw-r--r--mysql-test/r/type_timestamp.result10
-rw-r--r--mysql-test/r/type_timestamp_hires.result28
-rw-r--r--mysql-test/r/user_var.result5
-rw-r--r--mysql-test/r/variables.result4
-rw-r--r--mysql-test/r/windows.require2
-rw-r--r--mysql-test/r/xa_binlog.result2
-rw-r--r--mysql-test/std_data/onerow.xml13
-rw-r--r--mysql-test/suite/archive/archive.result2
-rw-r--r--mysql-test/suite/binlog/r/binlog_checkpoint.result94
-rw-r--r--mysql-test/suite/binlog/r/binlog_index.result4
-rw-r--r--mysql-test/suite/binlog/r/binlog_ioerr.result5
-rw-r--r--mysql-test/suite/binlog/r/binlog_mdev342.result1
-rw-r--r--mysql-test/suite/binlog/r/binlog_mysqlbinlog2.result12
-rw-r--r--mysql-test/suite/binlog/r/binlog_mysqlbinlog_row.result2
-rw-r--r--mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_innodb.result8
-rw-r--r--mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_myisam.result8
-rw-r--r--mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_trans.result2
-rw-r--r--mysql-test/suite/binlog/r/binlog_row_annotate.result13
-rw-r--r--mysql-test/suite/binlog/r/binlog_row_binlog.result2
-rw-r--r--mysql-test/suite/binlog/r/binlog_row_mysqlbinlog_options.result4
-rw-r--r--mysql-test/suite/binlog/r/binlog_server_id.result3
-rw-r--r--mysql-test/suite/binlog/r/binlog_stm_binlog.result2
-rw-r--r--mysql-test/suite/binlog/r/binlog_stm_blackhole.result1
-rw-r--r--mysql-test/suite/binlog/r/binlog_xa_recover.result202
-rw-r--r--mysql-test/suite/binlog/t/binlog_checkpoint.test120
-rw-r--r--mysql-test/suite/binlog/t/binlog_killed.test12
-rw-r--r--mysql-test/suite/binlog/t/binlog_killed_simulate.test4
-rw-r--r--mysql-test/suite/binlog/t/binlog_mysqlbinlog2.test38
-rw-r--r--mysql-test/suite/binlog/t/binlog_xa_recover-master.opt1
-rw-r--r--mysql-test/suite/binlog/t/binlog_xa_recover.test277
-rw-r--r--mysql-test/suite/csv/csv.result6
-rw-r--r--mysql-test/suite/engines/funcs/r/tc_rename_error.result2
-rw-r--r--mysql-test/suite/engines/iuds/r/insert_time.result7
-rw-r--r--mysql-test/suite/federated/federated_bug_35333.result6
-rw-r--r--mysql-test/suite/funcs_1/datadict/processlist_priv.inc32
-rw-r--r--mysql-test/suite/funcs_1/datadict/processlist_val.inc16
-rw-r--r--mysql-test/suite/funcs_1/r/innodb_func_view.result4
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_is.result4
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_is_embedded.result4
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_mysql.result38
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_mysql_embedded.result38
-rw-r--r--mysql-test/suite/funcs_1/r/is_engines_merge.result2
-rw-r--r--mysql-test/suite/funcs_1/r/is_key_column_usage.result9
-rw-r--r--mysql-test/suite/funcs_1/r/is_statistics.result9
-rw-r--r--mysql-test/suite/funcs_1/r/is_statistics_mysql.result9
-rw-r--r--mysql-test/suite/funcs_1/r/is_statistics_mysql_embedded.result18
-rw-r--r--mysql-test/suite/funcs_1/r/is_table_constraints.result3
-rw-r--r--mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result3
-rw-r--r--mysql-test/suite/funcs_1/r/is_table_constraints_mysql_embedded.result6
-rw-r--r--mysql-test/suite/funcs_1/r/is_tables_mysql.result69
-rw-r--r--mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result138
-rw-r--r--mysql-test/suite/funcs_1/r/memory_func_view.result4
-rw-r--r--mysql-test/suite/funcs_1/r/myisam_func_view.result4
-rw-r--r--mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result166
-rw-r--r--mysql-test/suite/funcs_1/r/processlist_priv_ps.result166
-rw-r--r--mysql-test/suite/funcs_1/r/processlist_val_no_prot.result48
-rw-r--r--mysql-test/suite/funcs_1/r/processlist_val_ps.result48
-rw-r--r--mysql-test/suite/innodb/r/binlog_consistent.result69
-rw-r--r--mysql-test/suite/innodb/r/group_commit_binlog_pos.result3
-rw-r--r--mysql-test/suite/innodb/r/group_commit_binlog_pos_no_optimize_thread.result3
-rw-r--r--mysql-test/suite/innodb/r/group_commit_crash.result10
-rw-r--r--mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result10
-rw-r--r--mysql-test/suite/innodb/r/innodb-create-options.result300
-rw-r--r--mysql-test/suite/innodb/r/innodb-index.result12
-rw-r--r--mysql-test/suite/innodb/r/innodb-zip.result178
-rw-r--r--mysql-test/suite/innodb/r/innodb.result10
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug12902967.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug13635833.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug21704.result8
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug46000.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_corrupt_bit.result4
-rw-r--r--mysql-test/suite/innodb/t/binlog_consistent.test1
-rw-r--r--mysql-test/suite/innodb/t/group_commit_binlog_pos.test13
-rw-r--r--mysql-test/suite/innodb/t/group_commit_binlog_pos_no_optimize_thread.test13
-rw-r--r--mysql-test/suite/innodb/t/group_commit_crash.test2
-rw-r--r--mysql-test/suite/innodb/t/group_commit_crash_no_optimize_thread.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb-create-options.test98
-rw-r--r--mysql-test/suite/innodb/t/innodb-zip.test42
-rw-r--r--mysql-test/suite/maria/maria-connect.result1
-rw-r--r--mysql-test/suite/maria/maria-recovery2.result2
-rw-r--r--mysql-test/suite/maria/maria-recovery2.test2
-rw-r--r--mysql-test/suite/maria/maria.result2
-rw-r--r--mysql-test/suite/multi_source/info_logs-master.opt1
-rw-r--r--mysql-test/suite/multi_source/info_logs.result115
-rw-r--r--mysql-test/suite/multi_source/info_logs.test178
-rw-r--r--mysql-test/suite/multi_source/multisource.result140
-rw-r--r--mysql-test/suite/multi_source/multisource.test230
-rw-r--r--mysql-test/suite/multi_source/my.cnf25
-rw-r--r--mysql-test/suite/multi_source/relaylog_events.result27
-rw-r--r--mysql-test/suite/multi_source/relaylog_events.test51
-rw-r--r--mysql-test/suite/multi_source/reset_master_slave.inc29
-rw-r--r--mysql-test/suite/multi_source/reset_slave.result28
-rw-r--r--mysql-test/suite/multi_source/reset_slave.test68
-rw-r--r--mysql-test/suite/multi_source/simple.result77
-rw-r--r--mysql-test/suite/multi_source/simple.test73
-rw-r--r--mysql-test/suite/multi_source/skip_counter.result113
-rw-r--r--mysql-test/suite/multi_source/skip_counter.test143
-rw-r--r--mysql-test/suite/multi_source/status_vars.result97
-rw-r--r--mysql-test/suite/multi_source/status_vars.test138
-rw-r--r--mysql-test/suite/multi_source/syntax.result87
-rw-r--r--mysql-test/suite/multi_source/syntax.test79
-rw-r--r--mysql-test/suite/multi_source/wait_for_sql_thread_read_all.inc6
-rw-r--r--mysql-test/suite/percona/percona_innodb_fake_changes.result8
-rw-r--r--mysql-test/suite/perfschema/r/all_instances.result5
-rw-r--r--mysql-test/suite/perfschema/r/dml_cond_instances.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_events_waits_current.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_events_waits_history.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_events_waits_history_long.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_ews_by_instance.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_ews_by_thread_by_event_name.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_ews_global_by_event_name.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_file_instances.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_file_summary_by_event_name.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_file_summary_by_instance.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_mutex_instances.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_performance_timers.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_rwlock_instances.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_threads.result4
-rw-r--r--mysql-test/suite/perfschema/r/misc.result4
-rw-r--r--mysql-test/suite/perfschema/r/myisam_file_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/privilege.result64
-rw-r--r--mysql-test/suite/perfschema/r/relaylog.result14
-rw-r--r--mysql-test/suite/perfschema/t/myisam_file_io.test1
-rw-r--r--mysql-test/suite/rpl/r/rpl_EE_err.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_binlog_errors.result6
-rw-r--r--mysql-test/suite/rpl/r/rpl_checksum.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_conditional_comments.result20
-rw-r--r--mysql-test/suite/rpl/r/rpl_deadlock_innodb.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_drop_db.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_filter_dbs_dynamic.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_filter_tables_dynamic.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_filter_wild_tables_dynamic.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_flush_logs.result10
-rw-r--r--mysql-test/suite/rpl/r/rpl_function_defaults.result132
-rw-r--r--mysql-test/suite/rpl/r/rpl_heartbeat.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_heartbeat_basic.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_insert_delayed,stmt.rdiff2
-rw-r--r--mysql-test/suite/rpl/r/rpl_loaddatalocal.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_mariadb_slave_capability.result98
-rw-r--r--mysql-test/suite/rpl/r/rpl_rewrt_db.result6
-rw-r--r--mysql-test/suite/rpl/r/rpl_rotate_logs.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_annotate_do.result1
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_annotate_dont.result1
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_log.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_log_innodb.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_max_relay_size.result7
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_show_relaylog_events.result82
-rw-r--r--mysql-test/suite/rpl/r/rpl_skip_replication.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_start_slave_deadlock_sys_vars.result31
-rw-r--r--mysql-test/suite/rpl/r/rpl_stm_log.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_stm_max_relay_size.result7
-rw-r--r--mysql-test/suite/rpl/r/rpl_stm_mix_show_relaylog_events.result76
-rw-r--r--mysql-test/suite/rpl/t/rpl_conditional_comments.test10
-rw-r--r--mysql-test/suite/rpl/t/rpl_flush_logs.test24
-rw-r--r--mysql-test/suite/rpl/t/rpl_function_defaults.test93
-rw-r--r--mysql-test/suite/rpl/t/rpl_loaddatalocal.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_mariadb_slave_capability.test104
-rw-r--r--mysql-test/suite/rpl/t/rpl_semi_sync.test15
-rw-r--r--mysql-test/suite/rpl/t/rpl_start_slave_deadlock_sys_vars.test57
-rw-r--r--mysql-test/suite/storage_engine/misc.result9
-rw-r--r--mysql-test/suite/sys_vars/r/default_master_connection_basic.result94
-rw-r--r--mysql-test/suite/sys_vars/r/default_storage_engine_basic.result4
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_flush_log_at_trx_commit_basic.result10
-rw-r--r--mysql-test/suite/sys_vars/r/max_relay_log_size_basic.result29
-rw-r--r--mysql-test/suite/sys_vars/r/sql_slave_skip_counter_basic.result3
-rw-r--r--mysql-test/suite/sys_vars/r/storage_engine_basic.result4
-rw-r--r--mysql-test/suite/sys_vars/r/use_stat_tables_basic.result95
-rw-r--r--mysql-test/suite/sys_vars/t/default_master_connection_basic.test129
-rw-r--r--mysql-test/suite/sys_vars/t/max_relay_log_size_basic.test5
-rw-r--r--mysql-test/suite/sys_vars/t/sql_slave_skip_counter_basic.test8
-rw-r--r--mysql-test/suite/sys_vars/t/use_stat_tables_basic.test100
-rw-r--r--mysql-test/suite/vcol/r/vcol_merge.result2
-rw-r--r--mysql-test/t/alter_table_mdev539_maria.test7
-rw-r--r--mysql-test/t/alter_table_mdev539_myisam.test7
-rw-r--r--mysql-test/t/cassandra.test719
-rw-r--r--mysql-test/t/comments.test12
-rw-r--r--mysql-test/t/create.test4
-rw-r--r--mysql-test/t/dyncol.test161
-rw-r--r--mysql-test/t/file_contents.test6
-rw-r--r--mysql-test/t/filesort_debug.test2
-rw-r--r--mysql-test/t/flush.test8
-rw-r--r--mysql-test/t/func_encrypt_nossl.test2
-rw-r--r--mysql-test/t/func_system.test6
-rw-r--r--mysql-test/t/func_time.test2
-rw-r--r--mysql-test/t/function_defaults.test23
-rw-r--r--mysql-test/t/function_defaults_notembedded.test18
-rw-r--r--mysql-test/t/group_by.test49
-rw-r--r--mysql-test/t/mdev-504.test77
-rw-r--r--mysql-test/t/mysql.test12
-rw-r--r--mysql-test/t/mysql_upgrade.test2
-rw-r--r--mysql-test/t/mysqldump-max.test2
-rw-r--r--mysql-test/t/order_by.test201
-rw-r--r--mysql-test/t/order_by_sortkey.test64
-rw-r--r--mysql-test/t/parser.test2
-rw-r--r--mysql-test/t/parser_not_embedded.test12
-rw-r--r--mysql-test/t/partition_innodb_plugin.test1
-rw-r--r--mysql-test/t/show_explain.test1138
-rw-r--r--mysql-test/t/show_explain_ps.test51
-rw-r--r--mysql-test/t/sp.test14
-rw-r--r--mysql-test/t/stat_tables.test207
-rw-r--r--mysql-test/t/stat_tables_disabled.test78
-rw-r--r--mysql-test/t/stat_tables_innodb.test12
-rw-r--r--mysql-test/t/stat_tables_par.test278
-rw-r--r--mysql-test/t/stat_tables_par_innodb.test12
-rw-r--r--mysql-test/t/stat_tables_partition.test17
-rw-r--r--mysql-test/t/stat_tables_rbr.test31
-rw-r--r--mysql-test/t/stat_tables_repl.test58
-rw-r--r--mysql-test/t/statistics.test564
-rw-r--r--mysql-test/t/str_to_datetime_457.test26
-rw-r--r--mysql-test/t/subselect_innodb.test48
-rw-r--r--mysql-test/t/system_mysql_db_fix40123.test7
-rw-r--r--mysql-test/t/system_mysql_db_fix50030.test8
-rw-r--r--mysql-test/t/system_mysql_db_fix50117.test8
-rw-r--r--mysql-test/t/type_timestamp.test11
-rw-r--r--mysql-test/t/type_timestamp_hires.test15
-rw-r--r--mysql-test/t/user_var.test14
-rw-r--r--mysql-test/t/xa_binlog.test2
-rw-r--r--mysys/array.c34
-rw-r--r--mysys/default.c6
-rw-r--r--mysys/errors.c113
-rw-r--r--mysys/hash.c6
-rw-r--r--mysys/ma_dyncol.c3556
-rw-r--r--mysys/mf_fn_ext.c37
-rw-r--r--mysys/mf_radix.c5
-rw-r--r--mysys/mf_sort.c2
-rw-r--r--mysys/mf_tempdir.c2
-rw-r--r--mysys/my_alloc.c37
-rw-r--r--mysys/my_chmod.c2
-rw-r--r--mysys/my_chsize.c2
-rw-r--r--mysys/my_copy.c2
-rw-r--r--mysys/my_create.c2
-rw-r--r--mysys/my_delete.c2
-rw-r--r--mysys/my_error.c6
-rw-r--r--mysys/my_fopen.c6
-rw-r--r--mysys/my_fstream.c8
-rw-r--r--mysys/my_getwd.c4
-rw-r--r--mysys/my_lib.c17
-rw-r--r--mysys/my_lock.c2
-rw-r--r--mysys/my_malloc.c126
-rw-r--r--mysys/my_open.c4
-rw-r--r--mysys/my_pread.c4
-rw-r--r--mysys/my_read.c2
-rw-r--r--mysys/my_redel.c2
-rw-r--r--mysys/my_rename.c2
-rw-r--r--mysys/my_seek.c4
-rw-r--r--mysys/my_sync.c7
-rw-r--r--mysys/my_thr_init.c7
-rw-r--r--mysys/my_write.c2
-rw-r--r--mysys/mysys_priv.h9
-rw-r--r--mysys/safemalloc.c116
-rw-r--r--mysys/string.c104
-rw-r--r--mysys/tree.c15
-rw-r--r--mysys/waiting_threads.c4
-rw-r--r--plugin/feedback/utils.cc2
-rw-r--r--plugin/semisync/semisync_master.cc1
-rw-r--r--plugin/semisync/semisync_master_plugin.cc18
-rw-r--r--scripts/mysql_system_tables.sql6
-rw-r--r--sql-common/client.c12
-rw-r--r--sql-common/client_plugin.c2
-rw-r--r--sql-common/my_time.c409
-rw-r--r--sql/CMakeLists.txt5
-rw-r--r--sql/bounded_queue.h195
-rw-r--r--sql/debug_sync.cc4
-rw-r--r--sql/event_data_objects.cc2
-rw-r--r--sql/event_db_repository.cc16
-rw-r--r--sql/event_scheduler.cc16
-rw-r--r--sql/events.cc35
-rw-r--r--sql/field.cc371
-rw-r--r--sql/field.h180
-rw-r--r--sql/filesort.cc577
-rw-r--r--sql/filesort.h4
-rw-r--r--sql/filesort_utils.cc143
-rw-r--r--sql/filesort_utils.h129
-rw-r--r--sql/ha_ndbcluster.cc13
-rw-r--r--sql/ha_ndbcluster_binlog.cc6
-rw-r--r--sql/ha_partition.cc38
-rw-r--r--sql/ha_partition.h5
-rw-r--r--sql/handler.cc55
-rw-r--r--sql/handler.h51
-rw-r--r--sql/item.cc11
-rw-r--r--sql/item.h2
-rw-r--r--sql/item_buff.cc9
-rw-r--r--sql/item_cmpfunc.cc72
-rw-r--r--sql/item_cmpfunc.h8
-rw-r--r--sql/item_create.cc118
-rw-r--r--sql/item_create.h1
-rw-r--r--sql/item_func.cc43
-rw-r--r--sql/item_func.h5
-rw-r--r--sql/item_strfunc.cc323
-rw-r--r--sql/item_strfunc.h28
-rw-r--r--sql/item_subselect.cc12
-rw-r--r--sql/item_sum.cc20
-rw-r--r--sql/keycaches.cc4
-rw-r--r--sql/lex.h4
-rw-r--r--sql/log.cc1147
-rw-r--r--sql/log.h136
-rw-r--r--sql/log_event.cc225
-rw-r--r--sql/log_event.h73
-rw-r--r--sql/log_event_old.cc36
-rw-r--r--sql/mdl.cc3
-rw-r--r--sql/multi_range_read.cc2
-rw-r--r--sql/my_apc.cc270
-rw-r--r--sql/my_apc.h138
-rw-r--r--sql/mysqld.cc404
-rw-r--r--sql/mysqld.h20
-rw-r--r--sql/net_serv.cc15
-rw-r--r--sql/opt_range.cc72
-rw-r--r--sql/opt_subselect.cc5
-rw-r--r--sql/protocol.cc2
-rw-r--r--sql/protocol.h5
-rw-r--r--sql/records.cc2
-rw-r--r--sql/rpl_filter.cc2
-rw-r--r--sql/rpl_handler.cc2
-rw-r--r--sql/rpl_handler.h2
-rw-r--r--sql/rpl_mi.cc618
-rw-r--r--sql/rpl_mi.h54
-rw-r--r--sql/rpl_rli.cc75
-rw-r--r--sql/rpl_rli.h6
-rw-r--r--sql/rpl_tblmap.cc2
-rw-r--r--sql/rpl_utility.cc2
-rw-r--r--sql/set_var.cc2
-rw-r--r--sql/set_var.h4
-rw-r--r--sql/share/errmsg-utf8.txt1220
-rw-r--r--sql/slave.cc377
-rw-r--r--sql/slave.h9
-rw-r--r--sql/sp.cc34
-rw-r--r--sql/sp_head.cc15
-rw-r--r--sql/sp_pcontext.cc40
-rw-r--r--sql/sql_acl.cc212
-rw-r--r--sql/sql_admin.cc93
-rw-r--r--sql/sql_analyse.h14
-rw-r--r--sql/sql_array.h74
-rw-r--r--sql/sql_audit.cc2
-rw-r--r--sql/sql_base.cc194
-rw-r--r--sql/sql_base.h20
-rw-r--r--sql/sql_cache.cc4
-rw-r--r--sql/sql_class.cc152
-rw-r--r--sql/sql_class.h182
-rw-r--r--sql/sql_const.h1
-rw-r--r--sql/sql_db.cc12
-rw-r--r--sql/sql_delete.cc13
-rw-r--r--sql/sql_error.cc25
-rw-r--r--sql/sql_error.h8
-rw-r--r--sql/sql_expression_cache.cc2
-rw-r--r--sql/sql_handler.cc2
-rw-r--r--sql/sql_insert.cc203
-rw-r--r--sql/sql_join_cache.cc1003
-rw-r--r--sql/sql_lex.cc157
-rw-r--r--sql/sql_lex.h29
-rw-r--r--sql/sql_load.cc74
-rw-r--r--sql/sql_parse.cc283
-rw-r--r--sql/sql_partition.cc3
-rw-r--r--sql/sql_plugin.cc23
-rw-r--r--sql/sql_prepare.cc9
-rw-r--r--sql/sql_priv.h2
-rw-r--r--sql/sql_reload.cc53
-rw-r--r--sql/sql_rename.cc7
-rw-r--r--sql/sql_repl.cc265
-rw-r--r--sql/sql_select.cc735
-rw-r--r--sql/sql_select.h26
-rw-r--r--sql/sql_servers.cc6
-rw-r--r--sql/sql_show.cc333
-rw-r--r--sql/sql_show.h26
-rw-r--r--sql/sql_sort.h50
-rw-r--r--sql/sql_statistics.cc3056
-rw-r--r--sql/sql_statistics.h248
-rw-r--r--sql/sql_string.cc105
-rw-r--r--sql/sql_string.h28
-rw-r--r--sql/sql_table.cc225
-rw-r--r--sql/sql_table.h4
-rw-r--r--sql/sql_test.cc4
-rw-r--r--sql/sql_trigger.cc4
-rw-r--r--sql/sql_udf.cc51
-rw-r--r--sql/sql_union.cc4
-rw-r--r--sql/sql_update.cc57
-rw-r--r--sql/sql_yacc.yy239
-rw-r--r--sql/strfunc.cc1
-rw-r--r--sql/structs.h21
-rw-r--r--sql/sys_vars.cc304
-rw-r--r--sql/sys_vars.h185
-rw-r--r--sql/table.cc189
-rw-r--r--sql/table.h117
-rw-r--r--sql/thr_malloc.cc5
-rw-r--r--sql/thr_malloc.h3
-rw-r--r--sql/tztime.cc10
-rw-r--r--sql/uniques.cc25
-rw-r--r--sql/unireg.cc8
-rw-r--r--storage/archive/ha_archive.cc4
-rw-r--r--storage/archive/ha_archive.h3
-rw-r--r--storage/blackhole/ha_blackhole.h2
-rw-r--r--storage/cassandra/CMakeLists.txt49
-rw-r--r--storage/cassandra/cassandra.cnf2
-rw-r--r--storage/cassandra/cassandra_se.cc800
-rw-r--r--storage/cassandra/cassandra_se.h123
-rw-r--r--storage/cassandra/gen-cpp/Cassandra.cpp12871
-rw-r--r--storage/cassandra/gen-cpp/Cassandra.h5466
-rw-r--r--storage/cassandra/gen-cpp/Cassandra_server.skeleton.cpp219
-rw-r--r--storage/cassandra/gen-cpp/cassandra_constants.cpp18
-rw-r--r--storage/cassandra/gen-cpp/cassandra_constants.h26
-rw-r--r--storage/cassandra/gen-cpp/cassandra_types.cpp3512
-rw-r--r--storage/cassandra/gen-cpp/cassandra_types.h2149
-rw-r--r--storage/cassandra/ha_cassandra.cc2619
-rw-r--r--storage/cassandra/ha_cassandra.h275
-rw-r--r--storage/csv/ha_tina.cc10
-rw-r--r--storage/csv/ha_tina.h1
-rw-r--r--storage/example/ha_example.cc5
-rw-r--r--storage/example/ha_example.h5
-rw-r--r--storage/federated/ha_federated.cc9
-rw-r--r--storage/federated/ha_federated.h4
-rw-r--r--storage/federatedx/federatedx_io_mysql.cc2
-rw-r--r--storage/federatedx/ha_federatedx.cc10
-rw-r--r--storage/federatedx/ha_federatedx.h4
-rw-r--r--storage/heap/ha_heap.cc6
-rw-r--r--storage/heap/ha_heap.h5
-rw-r--r--storage/heap/heapdef.h3
-rw-r--r--storage/heap/hp_block.c8
-rw-r--r--storage/heap/hp_create.c9
-rw-r--r--storage/heap/hp_open.c4
-rw-r--r--storage/heap/hp_write.c4
-rw-r--r--storage/innobase/handler/ha_innodb.cc203
-rw-r--r--storage/innobase/include/ha_prototypes.h11
-rw-r--r--storage/innobase/include/log0log.h7
-rw-r--r--storage/innobase/include/log0log.ic19
-rw-r--r--storage/innobase/log/log0log.c8
-rw-r--r--storage/innobase/mysql-test/storage_engine/alter_tablespace.rdiff11
-rw-r--r--storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff2
-rw-r--r--storage/innobase/trx/trx0trx.c11
-rw-r--r--storage/maria/ha_maria.cc30
-rw-r--r--storage/maria/ha_maria.h4
-rw-r--r--storage/maria/ma_bitmap.c2
-rw-r--r--storage/maria/ma_blockrec.c2
-rw-r--r--storage/maria/ma_check.c26
-rw-r--r--storage/maria/ma_ft_boolean_search.c4
-rw-r--r--storage/maria/ma_ft_nlq_search.c4
-rw-r--r--storage/maria/ma_ft_parser.c6
-rw-r--r--storage/maria/ma_loghandler.c6
-rw-r--r--storage/maria/ma_open.c2
-rw-r--r--storage/maria/ma_sort.c4
-rw-r--r--storage/maria/ma_write.c6
-rw-r--r--storage/maria/maria_def.h4
-rw-r--r--storage/maria/maria_pack.c4
-rw-r--r--storage/maria/unittest/sequence_storage.c2
-rw-r--r--storage/myisam/ft_boolean_search.c4
-rw-r--r--storage/myisam/ft_nlq_search.c4
-rw-r--r--storage/myisam/ft_parser.c5
-rw-r--r--storage/myisam/ft_stopwords.c3
-rw-r--r--storage/myisam/ha_myisam.cc28
-rw-r--r--storage/myisam/ha_myisam.h3
-rw-r--r--storage/myisam/mi_check.c25
-rw-r--r--storage/myisam/mi_write.c6
-rw-r--r--storage/myisam/myisamdef.h8
-rw-r--r--storage/myisam/myisamlog.c5
-rw-r--r--storage/myisam/myisampack.c4
-rw-r--r--storage/myisam/sort.c4
-rw-r--r--storage/myisammrg/ha_myisammrg.cc25
-rw-r--r--storage/myisammrg/ha_myisammrg.h1
-rw-r--r--storage/myisammrg/mysql-test/storage_engine/vcol.rdiff2
-rw-r--r--storage/oqgraph/ha_oqgraph.h4
-rw-r--r--storage/perfschema/ha_perfschema.h2
-rw-r--r--storage/perfschema/pfs_check.cc2
-rw-r--r--storage/perfschema/pfs_instr.cc3
-rw-r--r--storage/sphinx/ha_sphinx.cc12
-rw-r--r--storage/xtradb/handler/ha_innodb.cc240
-rw-r--r--storage/xtradb/handler/ha_innodb.h1
-rw-r--r--storage/xtradb/include/ha_prototypes.h11
-rw-r--r--storage/xtradb/include/log0log.h7
-rw-r--r--storage/xtradb/include/log0log.ic19
-rw-r--r--storage/xtradb/include/trx0trx.h1
-rw-r--r--storage/xtradb/log/log0log.c7
-rw-r--r--storage/xtradb/trx/trx0trx.c11
-rw-r--r--strings/my_vsnprintf.c188
-rw-r--r--support-files/compiler_warnings.supp15
-rw-r--r--unittest/mysys/my_vsnprintf-t.c35
-rw-r--r--unittest/sql/CMakeLists.txt3
-rw-r--r--unittest/sql/my_apc-t.cc227
632 files changed, 66405 insertions, 6873 deletions
diff --git a/.bzrignore b/.bzrignore
index 74c81c48e05..dedd26a8fb9 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -1136,6 +1136,7 @@ plugin/handler_socket/perl-Net-HandlerSocket/Makefile.PL
libmysqld/libmysqld_exports_file.cc
libmysqld/gcalc_slicescan.cc
libmysqld/gcalc_tools.cc
+libmysqld/my_apc.cc
sql/share/errmsg.sys
sql/share/mysql
install_manifest.txt
diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh
index 802c5788c08..a4853c823ab 100755
--- a/BUILD/SETUP.sh
+++ b/BUILD/SETUP.sh
@@ -63,6 +63,8 @@ parse_options()
EXTRA_CXXFLAGS=`get_key_value "$1"`;;
--extra-configs=*)
EXTRA_CONFIGS=`get_key_value "$1"`;;
+ --extra-makeflags=*)
+ EXTRA_MAKEFLAGS=`get_key_value "$1"`;;
-c | --just-configure)
just_configure=1;;
-n | --just-print | --print)
@@ -145,7 +147,7 @@ else
# C warnings
c_warnings="$warnings"
# C++ warnings
- cxx_warnings="$warnings -Wno-unused-parameter"
+ cxx_warnings="$warnings -Wno-unused-parameter -Wno-invalid-offsetof"
# cxx_warnings="$cxx_warnings -Woverloaded-virtual -Wsign-promo"
cxx_warnings="$cxx_warnings -Wnon-virtual-dtor"
debug_extra_cflags="-O0 -g3 -gdwarf-2"
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ff9e4fdfece..1b29c559415 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -106,7 +106,7 @@ ENDIF()
#
INCLUDE(maintainer)
-SET(MYSQL_MAINTAINER_MODE "AUTO" CACHE STRING "MySQL maintainer-specific development environment. Options are: ON OFF AUTO.")
+SET(MYSQL_MAINTAINER_MODE "OFF" CACHE STRING "MySQL maintainer-specific development environment. Options are: ON OFF AUTO.")
MARK_AS_ADVANCED(MYSQL_MAINTAINER_MODE)
# Whether the maintainer mode compiler options should be enabled.
@@ -266,16 +266,18 @@ MYSQL_CHECK_SSL()
MYSQL_CHECK_READLINE()
#
-# Setup maintainer mode options by the end. Platform checks are
+# Setup maintainer mode options. Platform checks are
# not run with the warning options as to not perturb fragile checks
# (i.e. do not make warnings into errors).
+# We have to add MAINTAINER_C_WARNINGS first to ensure that the flags
+# given by the invoking user are honored
#
IF(MYSQL_MAINTAINER_MODE MATCHES "ON")
- SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MY_MAINTAINER_C_WARNINGS}")
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MY_MAINTAINER_CXX_WARNINGS}")
+ SET(CMAKE_C_FLAGS "${MY_MAINTAINER_C_WARNINGS} ${CMAKE_C_FLAGS}")
+ SET(CMAKE_CXX_FLAGS "${MY_MAINTAINER_CXX_WARNINGS} ${CMAKE_CXX_FLAGS}")
ELSEIF(MYSQL_MAINTAINER_MODE MATCHES "AUTO")
- SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${MY_MAINTAINER_C_WARNINGS}")
- SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${MY_MAINTAINER_CXX_WARNINGS}")
+ SET(CMAKE_C_FLAGS_DEBUG "${MY_MAINTAINER_C_WARNINGS} ${CMAKE_C_FLAGS_DEBUG}")
+ SET(CMAKE_CXX_FLAGS_DEBUG "${MY_MAINTAINER_CXX_WARNINGS} ${CMAKE_CXX_FLAGS_DEBUG}")
ENDIF()
IF(WITH_UNIT_TESTS)
@@ -284,6 +286,7 @@ IF(WITH_UNIT_TESTS)
ADD_SUBDIRECTORY(unittest/strings)
ADD_SUBDIRECTORY(unittest/examples)
ADD_SUBDIRECTORY(unittest/mysys)
+ ADD_SUBDIRECTORY(unittest/sql)
ENDIF()
IF(NOT WITHOUT_SERVER)
diff --git a/VERSION b/VERSION
index 1889f4c8aff..2dacb98ef55 100644
--- a/VERSION
+++ b/VERSION
@@ -1,4 +1,8 @@
-MYSQL_VERSION_MAJOR=5
-MYSQL_VERSION_MINOR=5
-MYSQL_VERSION_PATCH=29
+# Version number for MariaDB is maintained here.
+# The version string is created from:
+# MYSQL_VERSION_MAJOR.MYSQL_VERSION_MINOR.MYSQL_VERSION_PATCH-MYSQL_VERSION_EXTRA
+#
+MYSQL_VERSION_MAJOR=10
+MYSQL_VERSION_MINOR=0
+MYSQL_VERSION_PATCH=1
MYSQL_VERSION_EXTRA=
diff --git a/client/completion_hash.cc b/client/completion_hash.cc
index 9ffb2082c06..c170b69de2d 100644
--- a/client/completion_hash.cc
+++ b/client/completion_hash.cc
@@ -49,7 +49,7 @@ int completion_hash_init(HashTable *ht, uint nSize)
ht->initialized = 0;
return FAILURE;
}
- init_alloc_root(&ht->mem_root, 8192, 0);
+ init_alloc_root(&ht->mem_root, 8192, 0, MYF(0));
ht->pHashFunction = hashpjw;
ht->nTableSize = nSize;
ht->initialized = 1;
diff --git a/client/mysql.cc b/client/mysql.cc
index 67878b36227..38054668411 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -1157,7 +1157,7 @@ int main(int argc,char *argv[])
}
glob_buffer.realloc(512);
completion_hash_init(&ht, 128);
- init_alloc_root(&hash_mem_root, 16384, 0);
+ init_alloc_root(&hash_mem_root, 16384, 0, MYF(0));
bzero((char*) &mysql, sizeof(mysql));
if (sql_connect(current_host,current_db,current_user,opt_password,
opt_silent))
diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc
index e2ebb883f77..61045518a17 100644
--- a/client/mysqladmin.cc
+++ b/client/mysqladmin.cc
@@ -25,7 +25,7 @@
#include <sql_common.h>
#include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */
-#define ADMIN_VERSION "9.0"
+#define ADMIN_VERSION "9.1"
#define MAX_MYSQL_VAR 512
#define SHUTDOWN_DEF_TIMEOUT 3600 /* Wait for shutdown */
#define MAX_TRUNC_LENGTH 3
@@ -99,6 +99,7 @@ enum commands {
ADMIN_FLUSH_HOSTS, ADMIN_FLUSH_TABLES, ADMIN_PASSWORD,
ADMIN_PING, ADMIN_EXTENDED_STATUS, ADMIN_FLUSH_STATUS,
ADMIN_FLUSH_PRIVILEGES, ADMIN_START_SLAVE, ADMIN_STOP_SLAVE,
+ ADMIN_START_ALL_SLAVES, ADMIN_STOP_ALL_SLAVES,
ADMIN_FLUSH_THREADS, ADMIN_OLD_PASSWORD, ADMIN_FLUSH_SLOW_LOG,
ADMIN_FLUSH_TABLE_STATISTICS, ADMIN_FLUSH_INDEX_STATISTICS,
ADMIN_FLUSH_USER_STATISTICS, ADMIN_FLUSH_CLIENT_STATISTICS,
@@ -112,6 +113,7 @@ static const char *command_names[]= {
"flush-hosts", "flush-tables", "password",
"ping", "extended-status", "flush-status",
"flush-privileges", "start-slave", "stop-slave",
+ "start-all-slaves", "stop-all-slaves",
"flush-threads", "old-password", "flush-slow-log",
"flush-table-statistics", "flush-index-statistics",
"flush-user-statistics", "flush-client-statistics",
@@ -1113,26 +1115,67 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
}
case ADMIN_START_SLAVE:
- if (mysql_query(mysql, "START SLAVE"))
+ case ADMIN_START_ALL_SLAVES:
+ {
+ my_bool many_slaves= 0;
+ const char *query= "START SLAVE";
+ if (command == ADMIN_START_ALL_SLAVES && mariadb_connection(mysql) &&
+ mysql_get_server_version(mysql) >= 100000)
+ {
+ query="START ALL SLAVES";
+ many_slaves= 1;
+ }
+
+ if (mysql_query(mysql, query))
{
my_printf_error(0, "Error starting slave: %s", error_flags,
mysql_error(mysql));
return -1;
}
+ else if (!many_slaves || mysql_warning_count(mysql) > 0)
+ {
+ if (!option_silent)
+ puts("Slave('s) started");
+ }
else
- puts("Slave started");
+ {
+ if (!option_silent)
+ puts("No slaves to start");
+ }
break;
+ }
case ADMIN_STOP_SLAVE:
- if (mysql_query(mysql, "STOP SLAVE"))
+ case ADMIN_STOP_ALL_SLAVES:
+ {
+ const char *query= "STOP SLAVE";
+ my_bool many_slaves= 0;
+
+ if (command == ADMIN_STOP_ALL_SLAVES && mariadb_connection(mysql) &&
+ mysql_get_server_version(mysql) >= 100000)
+ {
+ query="STOP ALL SLAVES";
+ many_slaves= 1;
+ }
+
+ if (mysql_query(mysql, query))
{
my_printf_error(0, "Error stopping slave: %s", error_flags,
mysql_error(mysql));
return -1;
}
+ else if (!many_slaves || mysql_warning_count(mysql) > 0)
+ {
+ /* We can't detect if there was any slaves to stop with STOP SLAVE */
+ if (many_slaves && !option_silent)
+ puts("Slave('s) stopped");
+ }
else
- puts("Slave stopped");
+ {
+ if (!option_silent)
+ puts("All slaves was already stopped");
+ }
break;
-
+ }
case ADMIN_PING:
mysql->reconnect=0; /* We want to know of reconnects */
if (!mysql_ping(mysql))
diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc
index 777e68902dd..87198347b10 100644
--- a/client/mysqlbinlog.cc
+++ b/client/mysqlbinlog.cc
@@ -278,8 +278,8 @@ public:
int init()
{
- return init_dynamic_array(&file_names, sizeof(File_name_record),
- 100, 100);
+ return my_init_dynamic_array(&file_names, sizeof(File_name_record),
+ 100, 100, MYF(0));
}
void init_by_dir_name(const char *dir)
@@ -1773,7 +1773,7 @@ static Exit_status check_master_version()
{
MYSQL_RES* res = 0;
MYSQL_ROW row;
- const char* version;
+ uint version;
if (mysql_query(mysql, "SELECT VERSION()") ||
!(res = mysql_store_result(mysql)))
@@ -1789,7 +1789,7 @@ static Exit_status check_master_version()
goto err;
}
- if (!(version = row[0]))
+ if (!(version = atoi(row[0])))
{
error("Could not find server version: "
"Master reported NULL for the version.");
@@ -1807,15 +1807,29 @@ static Exit_status check_master_version()
"Master returned '%s'", mysql_error(mysql));
goto err;
}
+
+ /*
+ Announce our capabilities to the server, so it will send us all the events
+ that we know about.
+ */
+ if (mysql_query(mysql, "SET @mariadb_slave_capability="
+ STRINGIFY_ARG(MARIA_SLAVE_CAPABILITY_MINE)))
+ {
+ error("Could not inform master about capability. Master returned '%s'",
+ mysql_error(mysql));
+ goto err;
+ }
+
delete glob_description_event;
- switch (*version) {
- case '3':
+ switch (version) {
+ case 3:
glob_description_event= new Format_description_log_event(1);
break;
- case '4':
+ case 4:
glob_description_event= new Format_description_log_event(3);
break;
- case '5':
+ case 5:
+ case 10:
/*
The server is soon going to send us its Format_description log
event, unless it is a 5.0 server with 3.23 or 4.0 binlogs.
@@ -1827,7 +1841,7 @@ static Exit_status check_master_version()
default:
glob_description_event= NULL;
error("Could not find server version: "
- "Master reported unrecognized MySQL version '%s'.", version);
+ "Master reported unrecognized MySQL version '%s'.", row[0]);
goto err;
}
if (!glob_description_event || !glob_description_event->is_valid())
@@ -2379,7 +2393,7 @@ int main(int argc, char** argv)
my_init_time(); // for time functions
- init_alloc_root(&s_mem_root, 16384, 0);
+ init_alloc_root(&s_mem_root, 16384, 0, MYF(0));
if (load_defaults("my", load_groups, &argc, &argv))
exit(1);
diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c
index 4218f2da62c..8d1122dc0fd 100644
--- a/client/mysqlcheck.c
+++ b/client/mysqlcheck.c
@@ -994,8 +994,10 @@ int main(int argc, char **argv)
}
if (opt_auto_repair &&
- (my_init_dynamic_array(&tables4repair, sizeof(char)*(NAME_LEN*2+2),16,64) ||
- my_init_dynamic_array(&tables4rebuild, sizeof(char)*(NAME_LEN*2+2),16,64)))
+ (my_init_dynamic_array(&tables4repair, sizeof(char)*(NAME_LEN*2+2),16,
+ 64, MYF(0)) ||
+ my_init_dynamic_array(&tables4rebuild, sizeof(char)*(NAME_LEN*2+2),16,
+ 64, MYF(0))))
goto end;
if (opt_alldbs)
diff --git a/client/mysqldump.c b/client/mysqldump.c
index c4dda927c68..a1789beae68 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -1914,9 +1914,7 @@ static void print_xml_row(FILE *xml_file, const char *row_name,
const char *str_create)
{
uint i;
-#ifndef DBUG_OFF
- my_bool body_found= 0;
-#endif
+ my_bool __attribute__((unused)) body_found= 0;
char *create_stmt_ptr= NULL;
ulong create_stmt_len= 0;
MYSQL_FIELD *field;
@@ -4585,7 +4583,7 @@ static int dump_selected_tables(char *db, char **table_names, int tables)
if (init_dumping(db, init_dumping_tables))
DBUG_RETURN(1);
- init_alloc_root(&root, 8192, 0);
+ init_alloc_root(&root, 8192, 0, MYF(0));
if (!(dump_tables= pos= (char**) alloc_root(&root, tables * sizeof(char *))))
die(EX_EOM, "alloc_root failure.");
@@ -4812,6 +4810,8 @@ static int do_show_slave_status(MYSQL *mysql_con)
MYSQL_RES *slave= 0;
const char *comment_prefix=
(opt_slave_data == MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL) ? "-- " : "";
+ LINT_INIT(slave);
+
if (mysql_query_with_error_report(mysql_con, &slave, "SHOW SLAVE STATUS"))
{
if (!ignore_errors)
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index 8c6c1cb3408..2cca53f4e74 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -86,6 +86,8 @@ static my_bool non_blocking_api_enabled= 0;
#define QUERY_SEND_FLAG 1
#define QUERY_REAP_FLAG 2
+#define QUERY_PRINT_ORIGINAL_FLAG 4
+
#ifndef HAVE_SETENV
static int setenv(const char *name, const char *value, int overwrite);
#endif
@@ -255,6 +257,8 @@ static void init_re(void);
static int match_re(my_regex_t *, char *);
static void free_re(void);
+static char *get_string(char **to_ptr, char **from_ptr,
+ struct st_command *command);
static int replace(DYNAMIC_STRING *ds_str,
const char *search_str, ulong search_len,
const char *replace_str, ulong replace_len);
@@ -344,7 +348,8 @@ enum enum_commands {
Q_ERROR,
Q_SEND, Q_REAP,
Q_DIRTY_CLOSE, Q_REPLACE, Q_REPLACE_COLUMN,
- Q_PING, Q_EVAL,
+ Q_PING, Q_EVAL,
+ Q_EVALP,
Q_EVAL_RESULT,
Q_ENABLE_QUERY_LOG, Q_DISABLE_QUERY_LOG,
Q_ENABLE_RESULT_LOG, Q_DISABLE_RESULT_LOG,
@@ -410,6 +415,7 @@ const char *command_names[]=
"replace_column",
"ping",
"eval",
+ "evalp",
"eval_result",
/* Enable/disable that the _query_ is logged to result file */
"enable_query_log",
@@ -4587,7 +4593,8 @@ void do_wait_for_slave_to_stop(struct st_command *c __attribute__((unused)))
}
-void do_sync_with_master2(struct st_command *command, long offset)
+void do_sync_with_master2(struct st_command *command, long offset,
+ const char *connection_name)
{
MYSQL_RES *res;
MYSQL_ROW row;
@@ -4598,8 +4605,9 @@ void do_sync_with_master2(struct st_command *command, long offset)
if (!master_pos.file[0])
die("Calling 'sync_with_master' without calling 'save_master_pos'");
- sprintf(query_buf, "select master_pos_wait('%s', %ld, %d)",
- master_pos.file, master_pos.pos + offset, timeout);
+ sprintf(query_buf, "select master_pos_wait('%s', %ld, %d, '%s')",
+ master_pos.file, master_pos.pos + offset, timeout,
+ connection_name);
if (mysql_query(mysql, query_buf))
die("failed in '%s': %d: %s", query_buf, mysql_errno(mysql),
@@ -4659,16 +4667,32 @@ void do_sync_with_master(struct st_command *command)
long offset= 0;
char *p= command->first_argument;
const char *offset_start= p;
+ char *start, *buff= 0;
+ start= (char*) "";
+
if (*offset_start)
{
for (; my_isdigit(charset_info, *p); p++)
offset = offset * 10 + *p - '0';
- if(*p && !my_isspace(charset_info, *p))
+ if (*p && !my_isspace(charset_info, *p) && *p != ',')
die("Invalid integer argument \"%s\"", offset_start);
+
+ while (*p && my_isspace(charset_info, *p))
+ p++;
+ if (*p == ',')
+ {
+ p++;
+ while (*p && my_isspace(charset_info, *p))
+ p++;
+ start= buff= (char*)my_malloc(strlen(p)+1,MYF(MY_WME | MY_FAE));
+ get_string(&buff, &p, command);
+ }
command->last_argument= p;
}
- do_sync_with_master2(command, offset);
+ do_sync_with_master2(command, offset, start);
+ if (buff)
+ my_free(start);
return;
}
@@ -5155,7 +5179,7 @@ typedef struct
static st_error global_error_names[] =
{
- { "<No error>", -1U, "" },
+ { "<No error>", ~0U, "" },
#include <mysqld_ername.h>
{ 0, 0, 0 }
};
@@ -5293,7 +5317,7 @@ void do_get_errcodes(struct st_command *command)
{
die("The sqlstate definition must start with an uppercase S");
}
- else if (*p == 'E')
+ else if (*p == 'E' || *p == 'W')
{
/* Error name string */
@@ -5302,9 +5326,9 @@ void do_get_errcodes(struct st_command *command)
to->type= ERR_ERRNO;
DBUG_PRINT("info", ("ERR_ERRNO: %d", to->code.errnum));
}
- else if (*p == 'e')
+ else if (*p == 'e' || *p == 'w')
{
- die("The error name definition must start with an uppercase E");
+ die("The error name definition must start with an uppercase E or W");
}
else
{
@@ -5367,8 +5391,8 @@ void do_get_errcodes(struct st_command *command)
If string is a '$variable', return the value of the variable.
*/
-char *get_string(char **to_ptr, char **from_ptr,
- struct st_command *command)
+static char *get_string(char **to_ptr, char **from_ptr,
+ struct st_command *command)
{
char c, sep;
char *to= *to_ptr, *from= *from_ptr, *start=to;
@@ -7267,7 +7291,7 @@ void init_win_path_patterns()
DBUG_ENTER("init_win_path_patterns");
- my_init_dynamic_array(&patterns, sizeof(const char*), 16, 16);
+ my_init_dynamic_array(&patterns, sizeof(const char*), 16, 16, MYF(0));
/* Loop through all paths in the array */
for (i= 0; i < num_paths; i++)
@@ -8302,7 +8326,8 @@ void run_query(struct st_connection *cn, struct st_command *command, int flags)
/*
Evaluate query if this is an eval command
*/
- if (command->type == Q_EVAL || command->type == Q_SEND_EVAL)
+ if (command->type == Q_EVAL || command->type == Q_SEND_EVAL ||
+ command->type == Q_EVALP)
{
init_dynamic_string(&eval_query, "", command->query_len+256, 1024);
do_eval(&eval_query, command->query, command->end, FALSE);
@@ -8334,10 +8359,20 @@ void run_query(struct st_connection *cn, struct st_command *command, int flags)
*/
if (!disable_query_log && (flags & QUERY_SEND_FLAG))
{
- replace_dynstr_append_mem(ds, query, query_len);
+ char *print_query= query;
+ int print_len= query_len;
+ if (flags & QUERY_PRINT_ORIGINAL_FLAG)
+ {
+ print_query= command->query;
+ print_len= command->end - command->query;
+ }
+ replace_dynstr_append_mem(ds, print_query, print_len);
dynstr_append_mem(ds, delimiter, delimiter_length);
dynstr_append_mem(ds, "\n", 1);
}
+
+ /* We're done with this flag */
+ flags &= ~QUERY_PRINT_ORIGINAL_FLAG;
/*
Write the command to the result file before we execute the query
@@ -8869,7 +8904,7 @@ int main(int argc, char **argv)
cur_block->ok= TRUE; /* Outer block should always be executed */
cur_block->cmd= cmd_none;
- my_init_dynamic_array(&q_lines, sizeof(struct st_command*), 1024, 1024);
+ my_init_dynamic_array(&q_lines, sizeof(struct st_command*), 1024, 1024, MYF(0));
if (my_hash_init2(&var_hash, 64, charset_info,
128, 0, 0, get_var_key, var_free, MYF(0)))
@@ -8899,7 +8934,7 @@ int main(int argc, char **argv)
#endif
init_dynamic_string(&ds_res, "", 2048, 2048);
- init_alloc_root(&require_file_root, 1024, 1024);
+ init_alloc_root(&require_file_root, 1024, 1024, MYF(0));
parse_args(argc, argv);
@@ -9198,6 +9233,7 @@ int main(int argc, char **argv)
case Q_EVAL_RESULT:
die("'eval_result' command is deprecated");
case Q_EVAL:
+ case Q_EVALP:
case Q_QUERY_VERTICAL:
case Q_QUERY_HORIZONTAL:
if (command->query == command->query_buf)
@@ -9225,6 +9261,9 @@ int main(int argc, char **argv)
flags= QUERY_REAP_FLAG;
}
+ if (command->type == Q_EVALP)
+ flags |= QUERY_PRINT_ORIGINAL_FLAG;
+
/* Check for special property for this query */
display_result_vertically|= (command->type == Q_QUERY_VERTICAL);
@@ -9294,7 +9333,7 @@ int main(int argc, char **argv)
select_connection(command);
else
select_connection_name("slave");
- do_sync_with_master2(command, 0);
+ do_sync_with_master2(command, 0, "");
break;
}
case Q_COMMENT:
@@ -9839,7 +9878,7 @@ struct st_replace_regex* init_replace_regex(char* expr)
/* my_malloc() will die on fail with MY_FAE */
res=(struct st_replace_regex*)my_malloc(
sizeof(*res)+expr_len ,MYF(MY_FAE+MY_WME));
- my_init_dynamic_array(&res->regex_arr,sizeof(struct st_regex),128,128);
+ my_init_dynamic_array(&res->regex_arr,sizeof(struct st_regex), 128, 128, MYF(0));
buf= (char*)res + sizeof(*res);
expr_end= expr + expr_len;
@@ -10894,7 +10933,7 @@ void dynstr_append_sorted(DYNAMIC_STRING* ds, DYNAMIC_STRING *ds_input,
if (!*start)
DBUG_VOID_RETURN; /* No input */
- my_init_dynamic_array(&lines, sizeof(const char*), 32, 32);
+ my_init_dynamic_array(&lines, sizeof(const char*), 32, 32, MYF(0));
if (keep_header)
{
diff --git a/cmake/build_configurations/mysql_release.cmake b/cmake/build_configurations/mysql_release.cmake
index 5b2596491ad..a8097f03824 100644
--- a/cmake/build_configurations/mysql_release.cmake
+++ b/cmake/build_configurations/mysql_release.cmake
@@ -46,6 +46,8 @@ SET(FEATURE_SET_large 5)
SET(FEATURE_SET_xlarge 6)
SET(FEATURE_SET_community 7)
+#SET(WITH_CASSANDRA_STORAGE_ENGINE ON)
+
IF(FEATURE_SET)
STRING(TOLOWER ${FEATURE_SET} feature_set)
SET(num ${FEATURE_SET_${feature_set}})
diff --git a/cmake/cpack_rpm.cmake b/cmake/cpack_rpm.cmake
index 056366e6ee6..40e71e68e5b 100644
--- a/cmake/cpack_rpm.cmake
+++ b/cmake/cpack_rpm.cmake
@@ -20,12 +20,13 @@ SET(CPACK_COMPONENT_CLIENT_GROUP "client")
SET(CPACK_COMPONENT_MANPAGESCLIENT_GROUP "client")
SET(CPACK_COMPONENT_README_GROUP "server")
SET(CPACK_COMPONENT_SHAREDLIBRARIES_GROUP "shared")
+SET(CPACK_COMPONENT_CASSANDRASELIBRARIES_GROUP "CassandraSE")
SET(CPACK_COMPONENT_COMMON_GROUP "common")
SET(CPACK_COMPONENT_COMPAT_GROUP "compat")
SET(CPACK_COMPONENTS_ALL Server ManPagesServer IniFiles Server_Scripts
SupportFiles Development ManPagesDevelopment
ManPagesTest Readme ManPagesClient Test
- Common Client SharedLibraries)
+ Common Client SharedLibraries CassandraSE)
SET(CPACK_RPM_PACKAGE_NAME "MariaDB")
SET(CPACK_PACKAGE_FILE_NAME "${CPACK_RPM_PACKAGE_NAME}-${VERSION}-${RPM}-${CMAKE_SYSTEM_PROCESSOR}")
@@ -63,6 +64,7 @@ SET(CPACK_RPM_SPEC_MORE_DEFINE "${CPACK_RPM_SPEC_MORE_DEFINE}
")
SET(CPACK_RPM_PACKAGE_REQUIRES "MariaDB-common")
+SET(CPACK_RPM_CassandraSE_PACKAGE_REQUIRES "MariaDB-server")
SET(ignored
"%ignore /etc"
diff --git a/cmake/maintainer.cmake b/cmake/maintainer.cmake
index 9c9ab8cca3b..d52405e8bcc 100644
--- a/cmake/maintainer.cmake
+++ b/cmake/maintainer.cmake
@@ -18,7 +18,7 @@ INCLUDE(CheckCCompilerFlag)
# Setup GCC (GNU C compiler) warning options.
MACRO(SET_MYSQL_MAINTAINER_GNU_C_OPTIONS)
SET(MY_MAINTAINER_WARNINGS
- "-Wall -Wextra -Wunused -Wwrite-strings -Wno-strict-aliasing")
+ "-Wall -Wextra -Wunused -Wwrite-strings -Wno-strict-aliasing -Wno-invalid-offsetof -DFORCE_INIT_OF_VARS")
CHECK_C_COMPILER_FLAG("-Wno-missing-field-initializers"
HAVE_NO_MISSING_FIELD_INITIALIZERS)
diff --git a/cmake/mysql_version.cmake b/cmake/mysql_version.cmake
index 021e7ab4ae8..9dd2dfed1e6 100644
--- a/cmake/mysql_version.cmake
+++ b/cmake/mysql_version.cmake
@@ -49,7 +49,9 @@ MACRO(GET_MYSQL_VERSION)
MYSQL_GET_CONFIG_VALUE("MYSQL_VERSION_PATCH" PATCH_VERSION)
MYSQL_GET_CONFIG_VALUE("MYSQL_VERSION_EXTRA" EXTRA_VERSION)
- IF(NOT MAJOR_VERSION OR NOT MINOR_VERSION OR NOT PATCH_VERSION)
+IF(NOT "${MAJOR_VERSION}" MATCHES "[0-9]+" OR
+ NOT "${MINOR_VERSION}" MATCHES "[0-9]+" OR
+ NOT "${PATCH_VERSION}" MATCHES "[0-9]+")
MESSAGE(FATAL_ERROR "VERSION file cannot be parsed.")
ENDIF()
diff --git a/config.h.cmake b/config.h.cmake
index e6ec8b23b58..fa1740783ec 100644
--- a/config.h.cmake
+++ b/config.h.cmake
@@ -41,7 +41,6 @@
#cmakedefine HAVE_FNMATCH_H 1
#cmakedefine HAVE_FPU_CONTROL_H 1
#cmakedefine HAVE_GRP_H 1
-#cmakedefine HAVE_EXPLICIT_TEMPLATE_INSTANTIATION 1
#cmakedefine HAVE_IA64INTRIN_H 1
#cmakedefine HAVE_IEEEFP_H 1
#cmakedefine HAVE_INTTYPES_H 1
diff --git a/configure.cmake b/configure.cmake
index 05fa80e852f..bb9b4f9e7e8 100644
--- a/configure.cmake
+++ b/configure.cmake
@@ -55,26 +55,18 @@ ENDIF()
# Always enable -Wall for gnu C/C++
-IF(CMAKE_COMPILER_IS_GNUCXX)
- SET(CMAKE_CXX_FLAGS "-Wall ${CMAKE_CXX_FLAGS} -Wno-unused-parameter")
+IF(CMAKE_COMPILER_IS_GNUCXX AND NOT CMAKE_CXX_FLAGS MATCHES ".*-Wall.*")
+ SET(CMAKE_CXX_FLAGS "-Wall ${CMAKE_CXX_FLAGS} -Wall -Wno-unused-parameter")
ENDIF()
-IF(CMAKE_COMPILER_IS_GNUCC)
- SET(CMAKE_C_FLAGS "-Wall ${CMAKE_C_FLAGS}")
+IF(CMAKE_COMPILER_IS_GNUCC AND NOT CMAKE_C_FLAGS MATCHES ".*-Wall.*")
+ SET(CMAKE_C_FLAGS "-Wall ${CMAKE_C_FLAGS} -Wall")
ENDIF()
IF(CMAKE_COMPILER_IS_GNUCXX)
# MySQL "canonical" GCC flags. At least -fno-rtti flag affects
# ABI and cannot be simply removed.
- SET(CMAKE_CXX_FLAGS
- "${CMAKE_CXX_FLAGS} -fno-implicit-templates -fno-exceptions -fno-rtti")
- IF(CMAKE_CXX_FLAGS)
- STRING(REGEX MATCH "fno-implicit-templates" NO_IMPLICIT_TEMPLATES
- ${CMAKE_CXX_FLAGS})
- IF (NO_IMPLICIT_TEMPLATES)
- SET(HAVE_EXPLICIT_TEMPLATE_INSTANTIATION TRUE)
- ENDIF()
- ENDIF()
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions -fno-rtti")
IF (CMAKE_EXE_LINKER_FLAGS MATCHES " -static "
OR CMAKE_EXE_LINKER_FLAGS MATCHES " -static$")
diff --git a/extra/comp_err.c b/extra/comp_err.c
index 1fababf3ae5..6d696bcb1d2 100644
--- a/extra/comp_err.c
+++ b/extra/comp_err.c
@@ -714,12 +714,12 @@ static ha_checksum checksum_format_specifier(const char* msg)
}
else if (start)
{
- switch(*p)
- {
+ switch(*p) {
case 'd':
case 'u':
case 'x':
case 's':
+ case 'M':
chksum= my_checksum(chksum, (uchar*) start, (uint) (p + 1 - start));
start= 0; /* Not in format specifier anymore */
break;
@@ -894,7 +894,7 @@ static struct errors *generate_empty_message(uint d_code)
if (!(new_error= (struct errors *) my_malloc(sizeof(*new_error),
MYF(MY_WME))))
return(0);
- if (my_init_dynamic_array(&new_error->msg, sizeof(struct message), 0, 1))
+ if (my_init_dynamic_array(&new_error->msg, sizeof(struct message), 0, 1, MYF(0)))
return(0); /* OOM: Fatal error */
new_error->er_name= NULL;
@@ -928,7 +928,7 @@ static struct errors *parse_error_string(char *str, int er_count)
MYF(MY_WME))))
DBUG_RETURN(0);
- if (my_init_dynamic_array(&new_error->msg, sizeof(struct message), 0, 0))
+ if (my_init_dynamic_array(&new_error->msg, sizeof(struct message), 0, 0, MYF(0)))
DBUG_RETURN(0); /* OOM: Fatal error */
/* getting the error name */
diff --git a/extra/perror.c b/extra/perror.c
index de8a9b8a0c3..8aa6aa35b08 100644
--- a/extra/perror.c
+++ b/extra/perror.c
@@ -32,9 +32,8 @@
static my_bool verbose, print_all_codes;
-#include "../include/my_base.h"
-#include "../mysys/my_handler_errors.h"
-// #include "../include/my_compare.h"
+#include <my_base.h>
+#include <my_handler_errors.h>
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
static my_bool ndb_code;
diff --git a/extra/replace.c b/extra/replace.c
index 8aef9f9a0a0..c2dcc9f50b5 100644
--- a/extra/replace.c
+++ b/extra/replace.c
@@ -265,7 +265,7 @@ static int insert_pointer_name(reg1 POINTER_ARRAY *pa,char * name)
if (!(pa->str= (uchar*) my_malloc((uint) (PS_MALLOC-MALLOC_OVERHEAD),
MYF(MY_WME))))
{
- my_free(pa->typelib.type_names);
+ my_free((char*) pa->typelib.type_names);
DBUG_RETURN (-1);
}
pa->max_count=(PC_MALLOC-MALLOC_OVERHEAD)/(sizeof(uchar*)+
@@ -327,7 +327,7 @@ static void free_pointer_array(reg1 POINTER_ARRAY *pa)
if (pa->typelib.count)
{
pa->typelib.count=0;
- my_free(pa->typelib.type_names);
+ my_free((char*) pa->typelib.type_names);
pa->typelib.type_names=0;
my_free(pa->str);
}
diff --git a/extra/resolve_stack_dump.c b/extra/resolve_stack_dump.c
index dd77e1785d0..576710e0bde 100644
--- a/extra/resolve_stack_dump.c
+++ b/extra/resolve_stack_dump.c
@@ -223,7 +223,7 @@ static void init_sym_table()
{
char buf[512];
if (my_init_dynamic_array(&sym_table, sizeof(SYM_ENTRY), INIT_SYM_TABLE,
- INC_SYM_TABLE))
+ INC_SYM_TABLE, MYF(0)))
die("Failed in my_init_dynamic_array() -- looks like out of memory problem");
while (fgets(buf, sizeof(buf), fp_sym))
diff --git a/extra/yassl/CMakeLists.txt b/extra/yassl/CMakeLists.txt
index 46e1abbc08e..5f2f1956803 100644
--- a/extra/yassl/CMakeLists.txt
+++ b/extra/yassl/CMakeLists.txt
@@ -30,10 +30,6 @@ SET(YASSL_SOURCES src/buffer.cpp src/cert_wrapper.cpp src/crypto_wrapper.cpp sr
src/log.cpp src/socket_wrapper.cpp src/ssl.cpp src/timer.cpp src/yassl_error.cpp
src/yassl_imp.cpp src/yassl_int.cpp)
-IF(HAVE_EXPLICIT_TEMPLATE_INSTANTIATION)
- SET(YASSL_SOURCES ${YASSL_SOURCES} src/template_instnt.cpp)
-ENDIF()
-
ADD_CONVENIENCE_LIBRARY(yassl ${YASSL_SOURCES})
RESTRICT_SYMBOL_EXPORTS(yassl)
diff --git a/extra/yassl/src/crypto_wrapper.cpp b/extra/yassl/src/crypto_wrapper.cpp
index afb492c83c5..42a43627760 100644
--- a/extra/yassl/src/crypto_wrapper.cpp
+++ b/extra/yassl/src/crypto_wrapper.cpp
@@ -993,25 +993,4 @@ x509* PemToDer(FILE* file, CertType type, EncryptedInfo* info)
} // namespace
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-namespace yaSSL {
-template void ysDelete<DiffieHellman::DHImpl>(DiffieHellman::DHImpl*);
-template void ysDelete<Integer::IntegerImpl>(Integer::IntegerImpl*);
-template void ysDelete<RSA::RSAImpl>(RSA::RSAImpl*);
-template void ysDelete<DSS::DSSImpl>(DSS::DSSImpl*);
-template void ysDelete<RandomPool::RandomImpl>(RandomPool::RandomImpl*);
-template void ysDelete<AES::AESImpl>(AES::AESImpl*);
-template void ysDelete<RC4::RC4Impl>(RC4::RC4Impl*);
-template void ysDelete<DES_EDE::DES_EDEImpl>(DES_EDE::DES_EDEImpl*);
-template void ysDelete<DES::DESImpl>(DES::DESImpl*);
-template void ysDelete<HMAC_RMD::HMAC_RMDImpl>(HMAC_RMD::HMAC_RMDImpl*);
-template void ysDelete<HMAC_SHA::HMAC_SHAImpl>(HMAC_SHA::HMAC_SHAImpl*);
-template void ysDelete<HMAC_MD5::HMAC_MD5Impl>(HMAC_MD5::HMAC_MD5Impl*);
-template void ysDelete<RMD::RMDImpl>(RMD::RMDImpl*);
-template void ysDelete<SHA::SHAImpl>(SHA::SHAImpl*);
-template void ysDelete<MD5::MD5Impl>(MD5::MD5Impl*);
-}
-#endif // HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-
#endif // !USE_CRYPTOPP_LIB
diff --git a/extra/yassl/src/template_instnt.cpp b/extra/yassl/src/template_instnt.cpp
deleted file mode 100644
index fe3a251b865..00000000000
--- a/extra/yassl/src/template_instnt.cpp
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- Copyright (C) 2000-2007 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING. If not, write to the
- Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
- MA 02110-1301 USA.
-*/
-
-
-/* Explicit template instantiation requests
- */
-
-
-#include "runtime.hpp"
-#include "handshake.hpp"
-#include "yassl_int.hpp"
-#include "crypto_wrapper.hpp"
-#include "hmac.hpp"
-#include "md5.hpp"
-#include "sha.hpp"
-#include "ripemd.hpp"
-#include "openssl/ssl.h"
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-
-namespace mySTL {
-template class list<unsigned char*>;
-template yaSSL::del_ptr_zero for_each(mySTL::list<unsigned char*>::iterator, mySTL::list<unsigned char*>::iterator, yaSSL::del_ptr_zero);
-template pair<int, yaSSL::Message* (*)()>* uninit_copy<mySTL::pair<int, yaSSL::Message* (*)()>*, mySTL::pair<int, yaSSL::Message* (*)()>*>(mySTL::pair<int, yaSSL::Message* (*)()>*, mySTL::pair<int, yaSSL::Message* (*)()>*, mySTL::pair<int, yaSSL::Message* (*)()>*);
-template pair<int, yaSSL::HandShakeBase* (*)()>* uninit_copy<mySTL::pair<int, yaSSL::HandShakeBase* (*)()>*, mySTL::pair<int, yaSSL::HandShakeBase* (*)()>*>(mySTL::pair<int, yaSSL::HandShakeBase* (*)()>*, mySTL::pair<int, yaSSL::HandShakeBase* (*)()>*, mySTL::pair<int, yaSSL::HandShakeBase* (*)()>*);
-template void destroy<mySTL::pair<int, yaSSL::Message* (*)()>*>(mySTL::pair<int, yaSSL::Message* (*)()>*, mySTL::pair<int, yaSSL::Message* (*)()>*);
-template void destroy<mySTL::pair<int, yaSSL::HandShakeBase* (*)()>*>(mySTL::pair<int, yaSSL::HandShakeBase* (*)()>*, mySTL::pair<int, yaSSL::HandShakeBase* (*)()>*);
-template pair<int, yaSSL::ServerKeyBase* (*)()>* uninit_copy<mySTL::pair<int, yaSSL::ServerKeyBase* (*)()>*, mySTL::pair<int, yaSSL::ServerKeyBase* (*)()>*>(mySTL::pair<int, yaSSL::ServerKeyBase* (*)()>*, mySTL::pair<int, yaSSL::ServerKeyBase* (*)()>*, mySTL::pair<int, yaSSL::ServerKeyBase* (*)()>*);
-template void destroy<mySTL::pair<int, yaSSL::ServerKeyBase* (*)()>*>(mySTL::pair<int, yaSSL::ServerKeyBase* (*)()>*, mySTL::pair<int, yaSSL::ServerKeyBase* (*)()>*);
-template pair<int, yaSSL::ClientKeyBase* (*)()>* uninit_copy<mySTL::pair<int, yaSSL::ClientKeyBase* (*)()>*, mySTL::pair<int, yaSSL::ClientKeyBase* (*)()>*>(mySTL::pair<int, yaSSL::ClientKeyBase* (*)()>*, mySTL::pair<int, yaSSL::ClientKeyBase* (*)()>*, mySTL::pair<int, yaSSL::ClientKeyBase* (*)()>*);
-template class list<TaoCrypt::Signer*>;
-template class list<yaSSL::SSL_SESSION*>;
-template class list<yaSSL::input_buffer*>;
-template class list<yaSSL::output_buffer*>;
-template class list<yaSSL::x509*>;
-template class list<yaSSL::Digest*>;
-template class list<yaSSL::BulkCipher*>;
-template void destroy<mySTL::pair<int, yaSSL::ClientKeyBase* (*)()>*>(mySTL::pair<int, yaSSL::ClientKeyBase* (*)()>*, mySTL::pair<int, yaSSL::ClientKeyBase* (*)()>*);
-template yaSSL::del_ptr_zero for_each<mySTL::list<TaoCrypt::Signer*>::iterator, yaSSL::del_ptr_zero>(mySTL::list<TaoCrypt::Signer*>::iterator, mySTL::list<TaoCrypt::Signer*>::iterator, yaSSL::del_ptr_zero);
-template yaSSL::del_ptr_zero for_each<mySTL::list<yaSSL::SSL_SESSION*>::iterator, yaSSL::del_ptr_zero>(mySTL::list<yaSSL::SSL_SESSION*>::iterator, mySTL::list<yaSSL::SSL_SESSION*>::iterator, yaSSL::del_ptr_zero);
-template yaSSL::del_ptr_zero for_each<mySTL::list<yaSSL::input_buffer*>::iterator, yaSSL::del_ptr_zero>(mySTL::list<yaSSL::input_buffer*>::iterator, mySTL::list<yaSSL::input_buffer*>::iterator, yaSSL::del_ptr_zero);
-template yaSSL::del_ptr_zero for_each<mySTL::list<yaSSL::output_buffer*>::iterator, yaSSL::del_ptr_zero>(mySTL::list<yaSSL::output_buffer*>::iterator, mySTL::list<yaSSL::output_buffer*>::iterator, yaSSL::del_ptr_zero);
-template yaSSL::del_ptr_zero for_each<mySTL::list<yaSSL::x509*>::iterator, yaSSL::del_ptr_zero>(mySTL::list<yaSSL::x509*>::iterator, mySTL::list<yaSSL::x509*>::iterator, yaSSL::del_ptr_zero);
-template yaSSL::del_ptr_zero for_each<mySTL::list<yaSSL::Digest*>::iterator, yaSSL::del_ptr_zero>(mySTL::list<yaSSL::Digest*>::iterator, mySTL::list<yaSSL::Digest*>::iterator, yaSSL::del_ptr_zero);
-template yaSSL::del_ptr_zero for_each<mySTL::list<yaSSL::BulkCipher*>::iterator, yaSSL::del_ptr_zero>(mySTL::list<yaSSL::BulkCipher*>::iterator, mySTL::list<yaSSL::BulkCipher*>::iterator, yaSSL::del_ptr_zero);
-template bool list<yaSSL::ThreadError>::erase(list<yaSSL::ThreadError>::iterator);
-template void list<yaSSL::ThreadError>::push_back(yaSSL::ThreadError);
-template void list<yaSSL::ThreadError>::pop_front();
-template void list<yaSSL::ThreadError>::pop_back();
-template list<yaSSL::ThreadError>::~list();
-template pair<int, yaSSL::Message* (*)()>* GetArrayMemory<pair<int, yaSSL::Message* (*)()> >(size_t);
-template void FreeArrayMemory<pair<int, yaSSL::Message* (*)()> >(pair<int, yaSSL::Message* (*)()>*);
-template pair<int, yaSSL::HandShakeBase* (*)()>* GetArrayMemory<pair<int, yaSSL::HandShakeBase* (*)()> >(size_t);
-template void FreeArrayMemory<pair<int, yaSSL::HandShakeBase* (*)()> >(pair<int, yaSSL::HandShakeBase* (*)()>*);
-template pair<int, yaSSL::ServerKeyBase* (*)()>* GetArrayMemory<pair<int, yaSSL::ServerKeyBase* (*)()> >(size_t);
-template void FreeArrayMemory<pair<int, yaSSL::ServerKeyBase* (*)()> >(pair<int, yaSSL::ServerKeyBase* (*)()>*);
-template pair<int, yaSSL::ClientKeyBase* (*)()>* GetArrayMemory<pair<int, yaSSL::ClientKeyBase* (*)()> >(size_t);
-template void FreeArrayMemory<pair<int, yaSSL::ClientKeyBase* (*)()> >(pair<int, yaSSL::ClientKeyBase* (*)()>*);
-}
-
-namespace yaSSL {
-template void ysDelete<SSL_CTX>(yaSSL::SSL_CTX*);
-template void ysDelete<SSL>(yaSSL::SSL*);
-template void ysDelete<BIGNUM>(yaSSL::BIGNUM*);
-template void ysDelete<unsigned char>(unsigned char*);
-template void ysDelete<DH>(yaSSL::DH*);
-template void ysDelete<TaoCrypt::Signer>(TaoCrypt::Signer*);
-template void ysDelete<SSL_SESSION>(yaSSL::SSL_SESSION*);
-template void ysDelete<input_buffer>(input_buffer*);
-template void ysDelete<output_buffer>(output_buffer*);
-template void ysDelete<x509>(x509*);
-template void ysDelete<Auth>(Auth*);
-template void ysDelete<HandShakeBase>(HandShakeBase*);
-template void ysDelete<ServerKeyBase>(ServerKeyBase*);
-template void ysDelete<ClientKeyBase>(ClientKeyBase*);
-template void ysDelete<SSL_METHOD>(SSL_METHOD*);
-template void ysDelete<DiffieHellman>(DiffieHellman*);
-template void ysDelete<BulkCipher>(BulkCipher*);
-template void ysDelete<Digest>(Digest*);
-template void ysDelete<X509>(X509*);
-template void ysDelete<Message>(Message*);
-template void ysDelete<sslFactory>(sslFactory*);
-template void ysDelete<Sessions>(Sessions*);
-template void ysDelete<Errors>(Errors*);
-template void ysArrayDelete<unsigned char>(unsigned char*);
-template void ysArrayDelete<char>(char*);
-
-template int min<int>(int, int);
-template uint16 min<uint16>(uint16, uint16);
-template unsigned int min<unsigned int>(unsigned int, unsigned int);
-template unsigned long min<unsigned long>(unsigned long, unsigned long);
-}
-
-#endif // HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-
diff --git a/extra/yassl/src/yassl_int.cpp b/extra/yassl/src/yassl_int.cpp
index 65e17b01544..25c026b8654 100644
--- a/extra/yassl/src/yassl_int.cpp
+++ b/extra/yassl/src/yassl_int.cpp
@@ -2600,13 +2600,3 @@ extern "C" void yaSSL_CleanUp()
yaSSL::errorsInstance = 0;
}
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-namespace mySTL {
-template yaSSL::yassl_int_cpp_local1::SumData for_each<mySTL::list<yaSSL::input_buffer*>::iterator, yaSSL::yassl_int_cpp_local1::SumData>(mySTL::list<yaSSL::input_buffer*>::iterator, mySTL::list<yaSSL::input_buffer*>::iterator, yaSSL::yassl_int_cpp_local1::SumData);
-template yaSSL::yassl_int_cpp_local1::SumBuffer for_each<mySTL::list<yaSSL::output_buffer*>::iterator, yaSSL::yassl_int_cpp_local1::SumBuffer>(mySTL::list<yaSSL::output_buffer*>::iterator, mySTL::list<yaSSL::output_buffer*>::iterator, yaSSL::yassl_int_cpp_local1::SumBuffer);
-template mySTL::list<yaSSL::SSL_SESSION*>::iterator find_if<mySTL::list<yaSSL::SSL_SESSION*>::iterator, yaSSL::yassl_int_cpp_local2::sess_match>(mySTL::list<yaSSL::SSL_SESSION*>::iterator, mySTL::list<yaSSL::SSL_SESSION*>::iterator, yaSSL::yassl_int_cpp_local2::sess_match);
-template mySTL::list<yaSSL::ThreadError>::iterator find_if<mySTL::list<yaSSL::ThreadError>::iterator, yaSSL::yassl_int_cpp_local2::thr_match>(mySTL::list<yaSSL::ThreadError>::iterator, mySTL::list<yaSSL::ThreadError>::iterator, yaSSL::yassl_int_cpp_local2::thr_match);
-}
-#endif
-
diff --git a/extra/yassl/taocrypt/CMakeLists.txt b/extra/yassl/taocrypt/CMakeLists.txt
index 10ed614445e..b75d478037e 100644
--- a/extra/yassl/taocrypt/CMakeLists.txt
+++ b/extra/yassl/taocrypt/CMakeLists.txt
@@ -29,10 +29,6 @@ SET(TAOCRYPT_SOURCES src/aes.cpp src/aestables.cpp src/algebra.cpp src/arc4.cpp
include/random.hpp include/ripemd.hpp include/rsa.hpp include/sha.hpp
include/rabbit.hpp include/hc128.hpp)
-IF(HAVE_EXPLICIT_TEMPLATE_INSTANTIATION)
- SET(TAOCRYPT_SOURCES ${TAOCRYPT_SOURCES} src/template_instnt.cpp)
-ENDIF()
-
ADD_CONVENIENCE_LIBRARY(taocrypt ${TAOCRYPT_SOURCES})
RESTRICT_SYMBOL_EXPORTS(taocrypt)
diff --git a/extra/yassl/taocrypt/src/algebra.cpp b/extra/yassl/taocrypt/src/algebra.cpp
index 29754b27b5e..b2bebfe8b94 100644
--- a/extra/yassl/taocrypt/src/algebra.cpp
+++ b/extra/yassl/taocrypt/src/algebra.cpp
@@ -325,13 +325,3 @@ void AbstractRing::SimultaneousExponentiate(Integer *results,
} // namespace
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-namespace mySTL {
-template TaoCrypt::WindowSlider* uninit_copy<TaoCrypt::WindowSlider*, TaoCrypt::WindowSlider*>(TaoCrypt::WindowSlider*, TaoCrypt::WindowSlider*, TaoCrypt::WindowSlider*);
-template void destroy<TaoCrypt::WindowSlider*>(TaoCrypt::WindowSlider*, TaoCrypt::WindowSlider*);
-template TaoCrypt::WindowSlider* GetArrayMemory<TaoCrypt::WindowSlider>(size_t);
-template void FreeArrayMemory<TaoCrypt::WindowSlider>(TaoCrypt::WindowSlider*);
-}
-#endif
-
diff --git a/extra/yassl/taocrypt/src/integer.cpp b/extra/yassl/taocrypt/src/integer.cpp
index 15deb59d4f3..8dccf1a1340 100644
--- a/extra/yassl/taocrypt/src/integer.cpp
+++ b/extra/yassl/taocrypt/src/integer.cpp
@@ -3890,17 +3890,5 @@ Integer CRT(const Integer &xp, const Integer &p, const Integer &xq,
return p * (u * (xq-xp) % q) + xp;
}
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-#ifndef TAOCRYPT_NATIVE_DWORD_AVAILABLE
-template hword DivideThreeWordsByTwo<hword, Word>(hword*, hword, hword, Word*);
-#endif
-template word DivideThreeWordsByTwo<word, DWord>(word*, word, word, DWord*);
-#ifdef SSE2_INTRINSICS_AVAILABLE
-template class AlignedAllocator<word>;
-#endif
-#endif
-
-
} // namespace
diff --git a/extra/yassl/taocrypt/src/template_instnt.cpp b/extra/yassl/taocrypt/src/template_instnt.cpp
deleted file mode 100644
index b472d18236f..00000000000
--- a/extra/yassl/taocrypt/src/template_instnt.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- Copyright (C) 2000-2007 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; see the file COPYING. If not, write to the
- Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
- MA 02110-1301 USA.
-*/
-
-
-/* Explicit template instantiation requests
- */
-
-
-#include "runtime.hpp"
-#include "integer.hpp"
-#include "rsa.hpp"
-#include "sha.hpp"
-#include "md5.hpp"
-#include "hmac.hpp"
-#include "ripemd.hpp"
-#include "pwdbased.hpp"
-#include "algebra.hpp"
-#include "vector.hpp"
-#include "hash.hpp"
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-namespace TaoCrypt {
-
-#if defined(SSE2_INTRINSICS_AVAILABLE)
-template AlignedAllocator<unsigned int>::pointer StdReallocate<unsigned int, AlignedAllocator<unsigned int> >(AlignedAllocator<unsigned int>&, unsigned int*, AlignedAllocator<unsigned int>::size_type, AlignedAllocator<unsigned int>::size_type, bool);
-#endif
-
-template class RSA_Decryptor<RSA_BlockType2>;
-template class RSA_Encryptor<RSA_BlockType1>;
-template class RSA_Encryptor<RSA_BlockType2>;
-template void tcDelete<HASH>(HASH*);
-template void tcDelete<Integer>(Integer*);
-template void tcArrayDelete<byte>(byte*);
-template AllocatorWithCleanup<byte>::pointer StdReallocate<byte, AllocatorWithCleanup<byte> >(AllocatorWithCleanup<byte>&, byte*, AllocatorWithCleanup<byte>::size_type, AllocatorWithCleanup<byte>::size_type, bool);
-template void tcArrayDelete<word>(word*);
-template AllocatorWithCleanup<word>::pointer StdReallocate<word, AllocatorWithCleanup<word> >(AllocatorWithCleanup<word>&, word*, AllocatorWithCleanup<word>::size_type, AllocatorWithCleanup<word>::size_type, bool);
-
-#ifndef TAOCRYPT_SLOW_WORD64 // defined when word != word32
-template void tcArrayDelete<word32>(word32*);
-template AllocatorWithCleanup<word32>::pointer StdReallocate<word32, AllocatorWithCleanup<word32> >(AllocatorWithCleanup<word32>&, word32*, AllocatorWithCleanup<word32>::size_type, AllocatorWithCleanup<word32>::size_type, bool);
-#endif
-
-template void tcArrayDelete<char>(char*);
-
-template class PBKDF2_HMAC<SHA>;
-template class HMAC<MD5>;
-template class HMAC<SHA>;
-template class HMAC<RIPEMD160>;
-}
-
-namespace mySTL {
-template vector<TaoCrypt::Integer>* uninit_fill_n<vector<TaoCrypt::Integer>*, size_t, vector<TaoCrypt::Integer> >(vector<TaoCrypt::Integer>*, size_t, vector<TaoCrypt::Integer> const&);
-template void destroy<vector<TaoCrypt::Integer>*>(vector<TaoCrypt::Integer>*, vector<TaoCrypt::Integer>*);
-template TaoCrypt::Integer* uninit_copy<TaoCrypt::Integer*, TaoCrypt::Integer*>(TaoCrypt::Integer*, TaoCrypt::Integer*, TaoCrypt::Integer*);
-template TaoCrypt::Integer* uninit_fill_n<TaoCrypt::Integer*, size_t, TaoCrypt::Integer>(TaoCrypt::Integer*, size_t, TaoCrypt::Integer const&);
-template void destroy<TaoCrypt::Integer*>(TaoCrypt::Integer*, TaoCrypt::Integer*);
-template TaoCrypt::byte* GetArrayMemory<TaoCrypt::byte>(size_t);
-template void FreeArrayMemory<TaoCrypt::byte>(TaoCrypt::byte*);
-template TaoCrypt::Integer* GetArrayMemory<TaoCrypt::Integer>(size_t);
-template void FreeArrayMemory<TaoCrypt::Integer>(TaoCrypt::Integer*);
-template vector<TaoCrypt::Integer>* GetArrayMemory<vector<TaoCrypt::Integer> >(size_t);
-template void FreeArrayMemory<vector<TaoCrypt::Integer> >(vector<TaoCrypt::Integer>*);
-template void FreeArrayMemory<void>(void*);
-}
-
-#endif
diff --git a/include/hash.h b/include/hash.h
index 98bcf911db2..2ecfe0891d7 100644
--- a/include/hash.h
+++ b/include/hash.h
@@ -39,6 +39,7 @@ extern "C" {
/* flags for hash_init */
#define HASH_UNIQUE 1 /* hash_insert fails on duplicate key */
+#define HASH_THREAD_SPECIFIC 2 /* Mark allocated memory THREAD_SPECIFIC */
typedef uint my_hash_value_type;
typedef uchar *(*my_hash_get_key)(const uchar *,size_t*,my_bool);
diff --git a/include/heap.h b/include/heap.h
index 10b165d89c6..149f9ed5f56 100644
--- a/include/heap.h
+++ b/include/heap.h
@@ -153,6 +153,7 @@ typedef struct st_heap_share
THR_LOCK lock;
mysql_mutex_t intern_lock; /* Locking for use with _locking */
my_bool delete_on_close;
+ my_bool internal; /* Internal temporary table */
LIST open_list;
uint auto_key;
uint auto_key_type; /* real type of the auto key segment */
diff --git a/include/ma_dyncol.h b/include/ma_dyncol.h
index 6174328d62a..ed5eb1e6bae 100644
--- a/include/ma_dyncol.h
+++ b/include/ma_dyncol.h
@@ -34,10 +34,10 @@
#include <mysql_time.h>
/*
- Max length for data in a dynamic colums. This comes from how the
- how the offset are stored.
+ Limits of implementation
*/
-#define MAX_DYNAMIC_COLUMN_LENGTH 0X1FFFFFFFL
+#define MAX_TOTAL_NAME_LENGTH 65535
+#define MAX_NAME_LENGTH (MAX_TOTAL_NAME_LENGTH/4)
/* NO and OK is the same used just to show semantics */
#define ER_DYNCOL_NO ER_DYNCOL_OK
@@ -50,7 +50,8 @@ enum enum_dyncol_func_result
ER_DYNCOL_LIMIT= -2, /* Some limit reached */
ER_DYNCOL_RESOURCE= -3, /* Out of resourses */
ER_DYNCOL_DATA= -4, /* Incorrect input data */
- ER_DYNCOL_UNKNOWN_CHARSET= -5 /* Unknown character set */
+ ER_DYNCOL_UNKNOWN_CHARSET= -5, /* Unknown character set */
+ ER_DYNCOL_TRUNCATED= 2 /* OK, but data was truncated */
};
typedef DYNAMIC_STRING DYNAMIC_COLUMN;
@@ -65,7 +66,8 @@ enum enum_dynamic_column_type
DYN_COL_DECIMAL,
DYN_COL_DATETIME,
DYN_COL_DATE,
- DYN_COL_TIME
+ DYN_COL_TIME,
+ DYN_COL_DYNCOL
};
typedef enum enum_dynamic_column_type DYNAMIC_COLUMN_TYPE;
@@ -79,7 +81,7 @@ struct st_dynamic_column_value
unsigned long long ulong_value;
double double_value;
struct {
- LEX_STRING value;
+ MYSQL_LEX_STRING value;
CHARSET_INFO *charset;
} string;
struct {
@@ -92,6 +94,7 @@ struct st_dynamic_column_value
typedef struct st_dynamic_column_value DYNAMIC_COLUMN_VALUE;
+#ifdef MADYNCOL_DEPRECATED
enum enum_dyncol_func_result
dynamic_column_create(DYNAMIC_COLUMN *str,
uint column_nr, DYNAMIC_COLUMN_VALUE *value);
@@ -101,7 +104,6 @@ dynamic_column_create_many(DYNAMIC_COLUMN *str,
uint column_count,
uint *column_numbers,
DYNAMIC_COLUMN_VALUE *values);
-
enum enum_dyncol_func_result
dynamic_column_update(DYNAMIC_COLUMN *org, uint column_nr,
DYNAMIC_COLUMN_VALUE *value);
@@ -112,36 +114,102 @@ dynamic_column_update_many(DYNAMIC_COLUMN *str,
DYNAMIC_COLUMN_VALUE *values);
enum enum_dyncol_func_result
-dynamic_column_delete(DYNAMIC_COLUMN *org, uint column_nr);
+dynamic_column_exists(DYNAMIC_COLUMN *org, uint column_nr);
enum enum_dyncol_func_result
-dynamic_column_exists(DYNAMIC_COLUMN *org, uint column_nr);
+dynamic_column_list(DYNAMIC_COLUMN *org, DYNAMIC_ARRAY *array_of_uint);
+
+enum enum_dyncol_func_result
+dynamic_column_get(DYNAMIC_COLUMN *org, uint column_nr,
+ DYNAMIC_COLUMN_VALUE *store_it_here);
+#endif
+
+/* new functions */
+enum enum_dyncol_func_result
+mariadb_dyncol_create_many(DYNAMIC_COLUMN *str,
+ uint column_count,
+ uint *column_numbers,
+ DYNAMIC_COLUMN_VALUE *values,
+ my_bool new_string);
+enum enum_dyncol_func_result
+mariadb_dyncol_create_many_named(DYNAMIC_COLUMN *str,
+ uint column_count,
+ MYSQL_LEX_STRING *column_keys,
+ DYNAMIC_COLUMN_VALUE *values,
+ my_bool new_string);
+
+
+enum enum_dyncol_func_result
+mariadb_dyncol_update_many(DYNAMIC_COLUMN *str,
+ uint add_column_count,
+ uint *column_keys,
+ DYNAMIC_COLUMN_VALUE *values);
+enum enum_dyncol_func_result
+mariadb_dyncol_update_many_named(DYNAMIC_COLUMN *str,
+ uint add_column_count,
+ MYSQL_LEX_STRING *column_keys,
+ DYNAMIC_COLUMN_VALUE *values);
+
+
+enum enum_dyncol_func_result
+mariadb_dyncol_exists(DYNAMIC_COLUMN *org, uint column_nr);
+enum enum_dyncol_func_result
+mariadb_dyncol_exists_named(DYNAMIC_COLUMN *str, MYSQL_LEX_STRING *name);
/* List of not NULL columns */
enum enum_dyncol_func_result
-dynamic_column_list(DYNAMIC_COLUMN *org, DYNAMIC_ARRAY *array_of_uint);
+mariadb_dyncol_list(DYNAMIC_COLUMN *str, uint *count, uint **nums);
+enum enum_dyncol_func_result
+mariadb_dyncol_list_named(DYNAMIC_COLUMN *str, uint *count,
+ MYSQL_LEX_STRING **names);
/*
if the column do not exists it is NULL
*/
enum enum_dyncol_func_result
-dynamic_column_get(DYNAMIC_COLUMN *org, uint column_nr,
+mariadb_dyncol_get(DYNAMIC_COLUMN *org, uint column_nr,
DYNAMIC_COLUMN_VALUE *store_it_here);
+enum enum_dyncol_func_result
+mariadb_dyncol_get_named(DYNAMIC_COLUMN *str, MYSQL_LEX_STRING *name,
+ DYNAMIC_COLUMN_VALUE *store_it_here);
+
+my_bool mariadb_dyncol_has_names(DYNAMIC_COLUMN *str);
+
+enum enum_dyncol_func_result
+mariadb_dyncol_check(DYNAMIC_COLUMN *str);
+
+enum enum_dyncol_func_result
+mariadb_dyncol_json(DYNAMIC_COLUMN *str, DYNAMIC_STRING *json);
#define dynamic_column_initialize(A) memset((A), 0, sizeof(*(A)))
#define dynamic_column_column_free(V) dynstr_free(V)
-/***************************************************************************
- Internal functions, don't use if you don't know what you are doing...
-***************************************************************************/
+/* conversion of values to 3 base types */
+enum enum_dyncol_func_result
+mariadb_dyncol_val_str(DYNAMIC_STRING *str, DYNAMIC_COLUMN_VALUE *val,
+ CHARSET_INFO *cs, my_bool quote);
+enum enum_dyncol_func_result
+mariadb_dyncol_val_long(longlong *ll, DYNAMIC_COLUMN_VALUE *val);
+enum enum_dyncol_func_result
+mariadb_dyncol_val_double(double *dbl, DYNAMIC_COLUMN_VALUE *val);
+
+
+enum enum_dyncol_func_result
+mariadb_dyncol_unpack(DYNAMIC_COLUMN *str,
+ uint *count,
+ MYSQL_LEX_STRING **names, DYNAMIC_COLUMN_VALUE **vals);
-#define dynamic_column_reassociate(V,P,L, A) dynstr_reassociate((V),(P),(L),(A))
+int mariadb_dyncol_column_cmp_named(const MYSQL_LEX_STRING *s1,
+ const MYSQL_LEX_STRING *s2);
+
+enum enum_dyncol_func_result
+mariadb_dyncol_column_count(DYNAMIC_COLUMN *str, uint *column_count);
-#define dynamic_column_value_init(V) (V)->type= DYN_COL_NULL
+#define mariadb_dyncol_value_init(V) (V)->type= DYN_COL_NULL
/*
Prepare value for using as decimal
*/
-void dynamic_column_prepare_decimal(DYNAMIC_COLUMN_VALUE *value);
+void mariadb_dyncol_prepare_decimal(DYNAMIC_COLUMN_VALUE *value);
#endif
diff --git a/include/maria.h b/include/maria.h
index cb2f92e8a35..cab296246b6 100644
--- a/include/maria.h
+++ b/include/maria.h
@@ -383,7 +383,8 @@ int maria_recreate_table(HA_CHECK *param, MARIA_HA **org_info, char *filename);
int maria_disable_indexes(MARIA_HA *info);
int maria_enable_indexes(MARIA_HA *info);
int maria_indexes_are_disabled(MARIA_HA *info);
-void maria_disable_non_unique_index(MARIA_HA *info, ha_rows rows);
+void maria_disable_indexes_for_rebuild(MARIA_HA *info, ha_rows rows,
+ my_bool all_keys);
my_bool maria_test_if_sort_rep(MARIA_HA *info, ha_rows rows, ulonglong key_map,
my_bool force);
diff --git a/include/my_base.h b/include/my_base.h
index e072bb7e2b1..4cbcb00425b 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
+ Copyright (c) 1995, 2012 Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -334,6 +335,12 @@ enum ha_base_keytype {
#define HA_CREATE_DELAY_KEY_WRITE 64
#define HA_CREATE_RELIES_ON_SQL_LAYER 128
+
+/* Flags used by start_bulk_insert */
+
+#define HA_CREATE_UNIQUE_INDEX_BY_SORT 1
+
+
/*
The following flags (OR-ed) are passed to handler::info() method.
The method copies misc handler information out of the storage engine
@@ -459,14 +466,16 @@ enum ha_base_keytype {
#define HA_ERR_FILE_TOO_SHORT 175 /* File too short */
#define HA_ERR_WRONG_CRC 176 /* Wrong CRC on page */
#define HA_ERR_TOO_MANY_CONCURRENT_TRXS 177 /*Too many active concurrent transactions */
-#define HA_ERR_INDEX_COL_TOO_LONG 178 /* Index column length exceeds limit */
-#define HA_ERR_INDEX_CORRUPT 179 /* Index corrupted */
-#define HA_ERR_UNDO_REC_TOO_BIG 180 /* Undo log record too big */
-#define HA_ERR_TABLE_IN_FK_CHECK 181 /* Table being used in foreign key check */
-#define HA_ERR_ROW_NOT_VISIBLE 182
-#define HA_ERR_ABORTED_BY_USER 183
-#define HA_ERR_DISK_FULL 184
-#define HA_ERR_LAST 184 /* Copy of last error nr */
+#define HA_ERR_NOT_IN_LOCK_PARTITIONS 178
+#define HA_ERR_INDEX_COL_TOO_LONG 179 /* Index column length exceeds limit */
+#define HA_ERR_INDEX_CORRUPT 180 /* Index corrupted */
+#define HA_ERR_UNDO_REC_TOO_BIG 181 /* Undo log record too big */
+#define HA_ERR_TABLE_IN_FK_CHECK 182 /* Table being used in foreign key check */
+#define HA_FTS_INVALID_DOCID 183 /* Invalid InnoDB Doc ID */
+#define HA_ERR_ROW_NOT_VISIBLE 184
+#define HA_ERR_ABORTED_BY_USER 185
+#define HA_ERR_DISK_FULL 186
+#define HA_ERR_LAST 186 /* Copy of last error nr */
/* Number of different errors */
#define HA_ERR_ERRORS (HA_ERR_LAST - HA_ERR_FIRST + 1)
diff --git a/include/my_dir.h b/include/my_dir.h
index 1ee002cc380..6a941b37d79 100644
--- a/include/my_dir.h
+++ b/include/my_dir.h
@@ -45,8 +45,9 @@ extern "C" {
#define MY_S_ISREG(m) (((m) & MY_S_IFMT) == MY_S_IFREG)
#define MY_S_ISFIFO(m) (((m) & MY_S_IFMT) == MY_S_IFIFO)
-#define MY_DONT_SORT 512 /* my_lib; Don't sort files */
-#define MY_WANT_STAT 1024 /* my_lib; stat files */
+/* Ensure these dosn't clash with anything in my_sys.h */
+#define MY_DONT_SORT 8192 /* my_lib; Don't sort files */
+#define MY_WANT_STAT 16384 /* my_lib; stat files */
/* typedefs for my_dir & my_stat */
diff --git a/include/my_global.h b/include/my_global.h
index 09a3a0a6efd..a99823b94d8 100644
--- a/include/my_global.h
+++ b/include/my_global.h
@@ -1003,7 +1003,7 @@ typedef struct st_mysql_lex_string LEX_STRING;
#define SOCKET_EMFILE EMFILE
#endif
-typedef int myf; /* Type of MyFlags in my_funcs */
+typedef ulong myf; /* Type of MyFlags in my_funcs */
typedef char my_bool; /* Small bool */
/* Macros for converting *constants* to the right type */
diff --git a/mysys/my_handler_errors.h b/include/my_handler_errors.h
index 8b3b359ea95..f2c51773e83 100644
--- a/mysys/my_handler_errors.h
+++ b/include/my_handler_errors.h
@@ -80,10 +80,12 @@ static const char *handler_error_messages[]=
"File too short; Expected more data in file",
"Read page with wrong checksum",
"Too many active concurrent transactions",
+ "Record not matching the given partition set",
"Index column length exceeds limit",
"Index corrupted",
"Undo record too big",
"Table is being used in foreign key check",
+ "Invalid InnoDB FTS Doc ID",
"Row is not visible by the current transaction",
"Operation was interrupted by end user (probably kill command?)",
"Disk full"
diff --git a/include/my_sys.h b/include/my_sys.h
index f0f5d7666fc..25f328b8a33 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -52,6 +52,7 @@ typedef struct my_aio_result {
area, and we would like to avoid unexpected truncation.
*/
#define MYSYS_ERRMSG_SIZE (512)
+#define MYSYS_STRERROR_SIZE (128)
#define MY_FILE_ERROR ((size_t) -1)
@@ -85,6 +86,10 @@ typedef struct my_aio_result {
#define MY_SYNC 4096 /* my_copy(): sync dst file */
#define MY_SYNC_DIR 32768 /* my_create/delete/rename: sync directory */
#define MY_SYNC_FILESIZE 65536 /* my_sync(): safe sync when file is extended */
+#define MY_THREAD_SPECIFIC 0x10000 /* my_malloc(): thread specific */
+#define MY_THREAD_MOVE 0x20000 /* realloc(); Memory can move */
+/* Tree that should delete things automaticly */
+#define MY_TREE_WITH_DELETE 0x40000
#define MY_CHECK_ERROR 1 /* Params to my_end; Check open-close */
#define MY_GIVE_INFO 2 /* Give time info about process*/
@@ -147,6 +152,18 @@ typedef struct my_aio_result {
/* Extra length needed for filename if one calls my_create_backup_name */
#define MY_BACKUP_NAME_EXTRA_LENGTH 17
+/* If we have our own safemalloc (for debugging) */
+#if defined(SAFEMALLOC)
+void sf_report_leaked_memory(my_thread_id id);
+extern my_thread_id (*sf_malloc_dbug_id)(void);
+#define SAFEMALLOC_REPORT_MEMORY(X) sf_report_leaked_memory(X)
+#else
+#define SAFEMALLOC_REPORT_MEMORY(X) do {} while(0)
+#endif
+
+typedef void (*MALLOC_SIZE_CB) (long long size, my_bool is_thread_specific);
+extern void set_malloc_size_cb(MALLOC_SIZE_CB func);
+
/* defines when allocating data */
extern void *my_malloc(size_t Size,myf MyFlags);
extern void *my_multi_malloc(myf MyFlags, ...);
@@ -182,7 +199,7 @@ extern void my_large_free(uchar *ptr);
#define my_alloca(SZ) alloca((size_t) (SZ))
#define my_afree(PTR) ((void)0)
#else
-#define my_alloca(SZ) my_malloc(SZ,MYF(MY_FAE))
+#define my_alloca(SZ) my_malloc(SZ,MYF(MY_FAE|MY_THREAD_SPECIFIC))
#define my_afree(PTR) my_free(PTR)
#endif /* HAVE_ALLOCA */
@@ -322,6 +339,7 @@ typedef struct st_dynamic_array
uint elements,max_element;
uint alloc_increment;
uint size_of_element;
+ myf malloc_flags;
} DYNAMIC_ARRAY;
typedef struct st_my_tmpdir
@@ -628,6 +646,7 @@ extern FILE *my_fdopen(File Filedes,const char *name, int Flags,myf MyFlags);
extern FILE *my_freopen(const char *path, const char *mode, FILE *stream);
extern int my_fclose(FILE *fd,myf MyFlags);
extern int my_vfprintf(FILE *stream, const char* format, va_list args);
+extern void my_strerror(char *buf, size_t len, int nr);
extern int my_fprintf(FILE *stream, const char* format, ...);
extern File my_fileno(FILE *fd);
extern int my_chsize(File fd,my_off_t newlength, int filler, myf MyFlags);
@@ -677,6 +696,7 @@ extern my_bool has_path(const char *name);
extern char *convert_dirname(char *to, const char *from, const char *from_end);
extern void to_unix_path(char * name);
extern char * fn_ext(const char *name);
+extern char * fn_ext2(const char *name);
extern char * fn_same(char * toname,const char *name,int flag);
extern char * fn_format(char * to,const char *name,const char *dir,
const char *form, uint flag);
@@ -712,6 +732,7 @@ extern int flush_write_cache(RECORD_CACHE *info);
extern void handle_recived_signals(void);
extern sig_handler my_set_alarm_variable(int signo);
+extern my_bool radixsort_is_appliccable(uint n_items, size_t size_of_element);
extern void my_string_ptr_sort(uchar *base,uint items,size_t size);
extern void radixsort_for_str_ptr(uchar* base[], uint number_of_elements,
size_t size_of_element,uchar *buffer[]);
@@ -764,16 +785,10 @@ extern my_bool real_open_cached_file(IO_CACHE *cache);
extern void close_cached_file(IO_CACHE *cache);
File create_temp_file(char *to, const char *dir, const char *pfx,
int mode, myf MyFlags);
-#define my_init_dynamic_array(A,B,C,D) init_dynamic_array2(A,B,NULL,C,D)
-#define my_init_dynamic_array_ci(A,B,C,D) init_dynamic_array2(A,B,NULL,C,D)
-#define my_init_dynamic_array2(A,B,C,D,E) init_dynamic_array2(A,B,C,D,E)
-#define my_init_dynamic_array2_ci(A,B,C,D,E) init_dynamic_array2(A,B,C,D,E)
-extern my_bool init_dynamic_array2(DYNAMIC_ARRAY *array, uint element_size,
+#define my_init_dynamic_array(A,B,C,D,E) my_init_dynamic_array2(A,B,NULL,C,D,E)
+extern my_bool my_init_dynamic_array2(DYNAMIC_ARRAY *array, uint element_size,
void *init_buffer, uint init_alloc,
- uint alloc_increment);
-/* init_dynamic_array() function is deprecated */
-extern my_bool init_dynamic_array(DYNAMIC_ARRAY *array, uint element_size,
- uint init_alloc, uint alloc_increment);
+ uint alloc_increment, myf my_flags);
extern my_bool insert_dynamic(DYNAMIC_ARRAY *array, const uchar * element);
extern uchar *alloc_dynamic(DYNAMIC_ARRAY *array);
extern uchar *pop_dynamic(DYNAMIC_ARRAY*);
@@ -797,12 +812,23 @@ my_bool dynstr_append_mem(DYNAMIC_STRING *str, const char *append,
size_t length);
extern my_bool dynstr_append_os_quoted(DYNAMIC_STRING *str, const char *append,
...);
+extern my_bool dynstr_append_quoted(DYNAMIC_STRING *str,
+ const char *append, size_t len,
+ char quote);
extern my_bool dynstr_set(DYNAMIC_STRING *str, const char *init_str);
extern my_bool dynstr_realloc(DYNAMIC_STRING *str, size_t additional_size);
extern my_bool dynstr_trunc(DYNAMIC_STRING *str, size_t n);
extern void dynstr_free(DYNAMIC_STRING *str);
+extern uint32 copy_and_convert_extended(char *to, uint32 to_length,
+ CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length,
+ CHARSET_INFO *from_cs, uint *errors);
extern void dynstr_reassociate(DYNAMIC_STRING *str, char **res, size_t *length,
size_t *alloc_length);
+extern uint32 copy_and_convert_extended(char *to, uint32 to_length,
+ CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length,
+ CHARSET_INFO *from_cs, uint *errors);
#ifdef HAVE_MLOCK
extern void *my_malloc_lock(size_t length,myf flags);
extern void my_free_lock(void *ptr);
@@ -814,7 +840,7 @@ extern void my_free_lock(void *ptr);
#define ALLOC_ROOT_MIN_BLOCK_SIZE (MALLOC_OVERHEAD + sizeof(USED_MEM) + 8)
#define clear_alloc_root(A) do { (A)->free= (A)->used= (A)->pre_alloc= 0; (A)->min_malloc=0;} while(0)
extern void init_alloc_root(MEM_ROOT *mem_root, size_t block_size,
- size_t pre_alloc_size);
+ size_t pre_alloc_size, myf my_flags);
extern void *alloc_root(MEM_ROOT *mem_root, size_t Size);
extern void *multi_alloc_root(MEM_ROOT *mem_root, ...);
extern void free_root(MEM_ROOT *root, myf MyFLAGS);
diff --git a/include/my_tree.h b/include/my_tree.h
index 3dd92712af2..f8be55f84b2 100644
--- a/include/my_tree.h
+++ b/include/my_tree.h
@@ -68,13 +68,15 @@ typedef struct st_tree {
MEM_ROOT mem_root;
my_bool with_delete;
tree_element_free free;
+ myf my_flags;
uint flag;
} TREE;
/* Functions on whole tree */
void init_tree(TREE *tree, size_t default_alloc_size, size_t memory_limit,
- int size, qsort_cmp2 compare, my_bool with_delete,
- tree_element_free free_element, void *custom_arg);
+ int size, qsort_cmp2 compare,
+ tree_element_free free_element, void *custom_arg,
+ myf my_flags);
void delete_tree(TREE*);
void reset_tree(TREE*);
diff --git a/include/myisamchk.h b/include/myisamchk.h
index 9478936c748..7e7b685a88a 100644
--- a/include/myisamchk.h
+++ b/include/myisamchk.h
@@ -63,6 +63,8 @@
#define T_ZEROFILL_KEEP_LSN ((ulonglong) 1L << 33)
/** If repair should not bump create_rename_lsn */
#define T_NO_CREATE_RENAME_LSN ((ulonglong) 1L << 33)
+#define T_CREATE_UNIQUE_BY_SORT ((ulonglong) 1L << 34)
+#define T_SUPPRESS_ERR_HANDLING ((ulonglong) 1L << 35)
#define T_REP_ANY (T_REP | T_REP_BY_SORT | T_REP_PARALLEL)
@@ -162,6 +164,7 @@ typedef struct st_handler_check_param
mysql_mutex_t print_msg_mutex;
my_bool need_print_msg_lock;
+ myf malloc_flags;
} HA_CHECK;
diff --git a/include/mysql.h.pp b/include/mysql.h.pp
index 48ce79046ff..331183180c0 100644
--- a/include/mysql.h.pp
+++ b/include/mysql.h.pp
@@ -28,7 +28,7 @@ typedef struct st_net {
unsigned char reading_or_writing;
char save_char;
char net_skip_rest_factor;
- my_bool unused1;
+ my_bool thread_specific_malloc;
my_bool compress;
my_bool unused3;
unsigned char *unused;
@@ -80,7 +80,7 @@ enum enum_mysql_set_option
MYSQL_OPTION_MULTI_STATEMENTS_ON,
MYSQL_OPTION_MULTI_STATEMENTS_OFF
};
-my_bool my_net_init(NET *net, Vio* vio);
+my_bool my_net_init(NET *net, Vio* vio, unsigned int my_flags);
void my_net_local_init(NET *net);
void net_end(NET *net);
void net_clear(NET *net, my_bool clear_buffer);
diff --git a/include/mysql/plugin.h b/include/mysql/plugin.h
index d28b762ce37..38573180232 100644
--- a/include/mysql/plugin.h
+++ b/include/mysql/plugin.h
@@ -167,7 +167,7 @@ enum enum_mysql_show_type
SHOW_UNDEF, SHOW_BOOL, SHOW_UINT, SHOW_ULONG,
SHOW_ULONGLONG, SHOW_CHAR, SHOW_CHAR_PTR,
SHOW_ARRAY, SHOW_FUNC, SHOW_DOUBLE,
- SHOW_SINT, SHOW_SLONG, SHOW_SLONGLONG,
+ SHOW_SINT, SHOW_SLONG, SHOW_SLONGLONG, SHOW_SIMPLE_FUNC,
SHOW_always_last
};
diff --git a/include/mysql/plugin_audit.h.pp b/include/mysql/plugin_audit.h.pp
index f19d5fe797c..fbd4ec2dd3f 100644
--- a/include/mysql/plugin_audit.h.pp
+++ b/include/mysql/plugin_audit.h.pp
@@ -104,7 +104,7 @@ enum enum_mysql_show_type
SHOW_UNDEF, SHOW_BOOL, SHOW_UINT, SHOW_ULONG,
SHOW_ULONGLONG, SHOW_CHAR, SHOW_CHAR_PTR,
SHOW_ARRAY, SHOW_FUNC, SHOW_DOUBLE,
- SHOW_SINT, SHOW_SLONG, SHOW_SLONGLONG,
+ SHOW_SINT, SHOW_SLONG, SHOW_SLONGLONG, SHOW_SIMPLE_FUNC,
SHOW_always_last
};
struct st_mysql_show_var {
diff --git a/include/mysql/plugin_auth.h.pp b/include/mysql/plugin_auth.h.pp
index 23153198a7d..46811825142 100644
--- a/include/mysql/plugin_auth.h.pp
+++ b/include/mysql/plugin_auth.h.pp
@@ -104,7 +104,7 @@ enum enum_mysql_show_type
SHOW_UNDEF, SHOW_BOOL, SHOW_UINT, SHOW_ULONG,
SHOW_ULONGLONG, SHOW_CHAR, SHOW_CHAR_PTR,
SHOW_ARRAY, SHOW_FUNC, SHOW_DOUBLE,
- SHOW_SINT, SHOW_SLONG, SHOW_SLONGLONG,
+ SHOW_SINT, SHOW_SLONG, SHOW_SLONGLONG, SHOW_SIMPLE_FUNC,
SHOW_always_last
};
struct st_mysql_show_var {
diff --git a/include/mysql/plugin_ftparser.h.pp b/include/mysql/plugin_ftparser.h.pp
index fb09a97618b..49cf7e5b931 100644
--- a/include/mysql/plugin_ftparser.h.pp
+++ b/include/mysql/plugin_ftparser.h.pp
@@ -104,7 +104,7 @@ enum enum_mysql_show_type
SHOW_UNDEF, SHOW_BOOL, SHOW_UINT, SHOW_ULONG,
SHOW_ULONGLONG, SHOW_CHAR, SHOW_CHAR_PTR,
SHOW_ARRAY, SHOW_FUNC, SHOW_DOUBLE,
- SHOW_SINT, SHOW_SLONG, SHOW_SLONGLONG,
+ SHOW_SINT, SHOW_SLONG, SHOW_SLONGLONG, SHOW_SIMPLE_FUNC,
SHOW_always_last
};
struct st_mysql_show_var {
diff --git a/include/mysql_com.h b/include/mysql_com.h
index c0f350d2bcf..5697fc952ce 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -125,6 +125,8 @@ enum enum_server_command
reserved by MySQL Cluster */
#define FIELD_FLAGS_COLUMN_FORMAT 24 /* Field column format, bit 24-25,
reserved by MySQL Cluster */
+#define HAS_EXPLICIT_VALUE (1 << 26) /* An INSERT/UPDATE operation supplied
+ an explicit default value */
#define REFRESH_GRANT (1UL << 0) /* Refresh grant tables */
#define REFRESH_LOG (1UL << 1) /* Start on new log file */
@@ -328,7 +330,7 @@ typedef struct st_net {
unsigned char reading_or_writing;
char save_char;
char net_skip_rest_factor;
- my_bool unused1; /* Please remove with the next incompatible ABI change */
+ my_bool thread_specific_malloc;
my_bool compress;
my_bool unused3; /* Please remove with the next incompatible ABI change. */
/*
@@ -463,10 +465,10 @@ enum enum_mysql_set_option
extern "C" {
#endif
-my_bool my_net_init(NET *net, Vio* vio);
+my_bool my_net_init(NET *net, Vio* vio, unsigned int my_flags);
void my_net_local_init(NET *net);
void net_end(NET *net);
- void net_clear(NET *net, my_bool clear_buffer);
+void net_clear(NET *net, my_bool clear_buffer);
my_bool net_realloc(NET *net, size_t length);
my_bool net_flush(NET *net);
my_bool my_net_write(NET *net,const unsigned char *packet, size_t len);
diff --git a/libmysql/CMakeLists.txt b/libmysql/CMakeLists.txt
index 5ad6b197379..29c8298a825 100644
--- a/libmysql/CMakeLists.txt
+++ b/libmysql/CMakeLists.txt
@@ -218,7 +218,6 @@ dynamic_column_create
dynamic_column_create_many
dynamic_column_update
dynamic_column_update_many
-dynamic_column_delete
dynamic_column_exists
dynamic_column_list
dynamic_column_get
diff --git a/libmysql/errmsg.c b/libmysql/errmsg.c
index 498ba6e9829..4c4485f7ec4 100644
--- a/libmysql/errmsg.c
+++ b/libmysql/errmsg.c
@@ -27,9 +27,9 @@ const char *client_errors[]=
{
"Unknown MySQL error",
"Can't create UNIX socket (%d)",
- "Can't connect to local MySQL server through socket '%-.100s' (%d)",
- "Can't connect to MySQL server on '%-.100s' (%d)",
- "Can't create TCP/IP socket (%d)",
+ "Can't connect to local MySQL server through socket '%-.100s' (%M)",
+ "Can't connect to MySQL server on '%-.100s' (%M)",
+ "Can't create TCP/IP socket (%M)",
"Unknown MySQL server host '%-.100s' (%d)",
"MySQL server has gone away",
"Protocol mismatch; server version = %d, client version = %d",
@@ -80,7 +80,7 @@ const char *client_errors[]=
"Prepared statement contains no metadata",
"Attempt to read a row while there is no result set associated with the statement",
"This feature is not implemented yet",
- "Lost connection to MySQL server at '%s', system error: %d",
+ "Lost connection to MySQL server at '%s', system error: %M",
"Statement closed indirectly because of a preceeding %s() call",
"The number of columns in the result set differs from the number of bound buffers. You must reset the statement, rebind the result set columns, and execute the statement again",
"This handle is already connected. Use a separate handle for each connection.",
diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c
index f90cc96a90f..69fce429ab9 100644
--- a/libmysql/libmysql.c
+++ b/libmysql/libmysql.c
@@ -1520,8 +1520,8 @@ mysql_stmt_init(MYSQL *mysql)
DBUG_RETURN(NULL);
}
- init_alloc_root(&stmt->mem_root, 2048, 2048);
- init_alloc_root(&stmt->result.alloc, 4096, 4096);
+ init_alloc_root(&stmt->mem_root, 2048, 2048, MYF(MY_THREAD_SPECIFIC));
+ init_alloc_root(&stmt->result.alloc, 4096, 4096, MYF(MY_THREAD_SPECIFIC));
stmt->result.alloc.min_malloc= sizeof(MYSQL_ROWS);
mysql->stmts= list_add(mysql->stmts, &stmt->list);
stmt->list.data= stmt;
@@ -1532,7 +1532,8 @@ mysql_stmt_init(MYSQL *mysql)
strmov(stmt->sqlstate, not_error_sqlstate);
/* The rest of statement members was bzeroed inside malloc */
- init_alloc_root(&stmt->extension->fields_mem_root, 2048, 0);
+ init_alloc_root(&stmt->extension->fields_mem_root, 2048, 0,
+ MYF(MY_THREAD_SPECIFIC));
DBUG_RETURN(stmt);
}
diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt
index 5af5f5e1807..0f08ecd76bb 100644
--- a/libmysqld/CMakeLists.txt
+++ b/libmysqld/CMakeLists.txt
@@ -44,6 +44,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
../sql-common/client_plugin.c ../sql-common/mysql_async.c
../sql/password.c ../sql/discover.cc ../sql/derror.cc
../sql/field.cc ../sql/field_conv.cc
+ ../sql/filesort_utils.cc
../sql/filesort.cc ../sql/gstream.cc ../sql/slave.cc
../sql/signal_handler.cc
../sql/handler.cc ../sql/hash_filo.cc ../sql/hostname.cc
@@ -73,7 +74,8 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
../sql/debug_sync.cc ../sql/opt_table_elimination.cc
../sql/sql_prepare.cc ../sql/sql_rename.cc ../sql/sql_repl.cc
../sql/sql_select.cc ../sql/sql_servers.cc
- ../sql/sql_show.cc ../sql/sql_state.c ../sql/sql_string.cc
+ ../sql/sql_show.cc ../sql/sql_state.c
+ ../sql/sql_statistics.cc ../sql/sql_string.cc
../sql/sql_tablespace.cc ../sql/sql_table.cc ../sql/sql_test.cc
../sql/sql_trigger.cc ../sql/sql_udf.cc ../sql/sql_union.cc
../sql/sql_update.cc ../sql/sql_view.cc ../sql/sql_profile.cc
@@ -95,6 +97,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
../sql/create_options.cc ../sql/rpl_utility.cc
../sql/rpl_reporting.cc
../sql/sql_expression_cache.cc
+ ../sql/my_apc.cc ../sql/my_apc.h
${GEN_SOURCES}
${MYSYS_LIBWRAP_SOURCE}
)
diff --git a/libmysqld/emb_qcache.cc b/libmysqld/emb_qcache.cc
index d263e5d5fe8..abb0631ebfb 100644
--- a/libmysqld/emb_qcache.cc
+++ b/libmysqld/emb_qcache.cc
@@ -415,7 +415,7 @@ int emb_load_querycache_result(THD *thd, Querycache_stream *src)
if (!data)
goto err;
- init_alloc_root(&data->alloc, 8192,0);
+ init_alloc_root(&data->alloc, 8192,0,MYF(0));
f_alloc= &data->alloc;
data->fields= src->load_int();
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index b454fc0d2e0..5332003885d 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -520,7 +520,8 @@ int init_embedded_server(int argc, char **argv, char **groups)
if (!groups)
groups= (char**) fake_groups;
- my_progname= (char *)"mysql_embedded";
+ if (!my_progname)
+ my_progname= (char *)"mysql_embedded";
/*
Perform basic logger initialization logger. Should be called after
@@ -634,7 +635,7 @@ void init_embedded_mysql(MYSQL *mysql, int client_flag)
mysql->server_version= server_version;
mysql->client_flag= client_flag;
//mysql->server_capabilities= client_flag;
- init_alloc_root(&mysql->field_alloc, 8192, 0);
+ init_alloc_root(&mysql->field_alloc, 8192, 0, MYF(0));
}
/**
@@ -906,8 +907,9 @@ int Protocol::begin_dataset()
if (!data)
return 1;
alloc= &data->alloc;
- init_alloc_root(alloc,8192,0); /* Assume rowlength < 8192 */
- alloc->min_malloc=sizeof(MYSQL_ROWS);
+ /* Assume rowlength < 8192 */
+ init_alloc_root(alloc, 8192, 0, MYF(0));
+ alloc->min_malloc= sizeof(MYSQL_ROWS);
return 0;
}
@@ -1004,7 +1006,7 @@ bool Protocol::send_result_set_metadata(List<Item> *list, uint flags)
thd_cs->mbmaxlen);
}
client_field->type= server_field.type;
- client_field->flags= server_field.flags;
+ client_field->flags= (uint16) server_field.flags;
client_field->decimals= server_field.decimals;
client_field->db_length= strlen(client_field->db);
client_field->table_length= strlen(client_field->table);
diff --git a/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test b/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test
index b41bfeaba74..4ce001cfdd7 100644
--- a/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test
+++ b/mysql-test/extra/binlog_tests/mix_innodb_myisam_binlog.test
@@ -318,8 +318,8 @@ connection con4;
select get_lock("a",10); # wait for rollback to finish
if (`select @@binlog_format = 'STATEMENT' || @@binlog_format = 'MIXED'`)
{
- --let $binlog_rollback= query_get_value(SHOW BINLOG EVENTS, Pos, 7)
- --let $binlog_query= query_get_value(SHOW BINLOG EVENTS, Info, 7)
+ --let $binlog_rollback= query_get_value(SHOW BINLOG EVENTS, Pos, 8)
+ --let $binlog_query= query_get_value(SHOW BINLOG EVENTS, Info, 8)
if ($binlog_query != ROLLBACK) {
--echo Wrong query from SHOW BINLOG EVENTS. Expected ROLLBACK, got '$binlog_query'
--source include/show_rpl_debug_info.inc
diff --git a/mysql-test/extra/rpl_tests/create_recursive_construct.inc b/mysql-test/extra/rpl_tests/create_recursive_construct.inc
index 6e130a8154f..e790a1db41f 100644
--- a/mysql-test/extra/rpl_tests/create_recursive_construct.inc
+++ b/mysql-test/extra/rpl_tests/create_recursive_construct.inc
@@ -325,7 +325,8 @@ if ($CRC_RET_stmt_sidef) {
SHOW BINLOG EVENTS;
--die Wrong number of warnings.
}
- --let $binlog_event= query_get_value(SHOW BINLOG EVENTS, Event_type, 2)
+ # There should be no events after format description and binlog checkpoint.
+ --let $binlog_event= query_get_value(SHOW BINLOG EVENTS, Event_type, 3)
if ($binlog_event != No such row) {
--enable_query_log
--echo ******** Failure! Something was written to the binlog despite SQL_LOG_BIN=0 ********
@@ -345,23 +346,23 @@ if ($CRC_RET_stmt_sidef) {
SHOW BINLOG EVENTS;
--die Warnings printed
}
- --let $event_type= query_get_value(SHOW BINLOG EVENTS, Event_type, 3)
- # The first event is format_description, the second is
- # Query_event('BEGIN'), and the third should be our Query
+ --let $event_type= query_get_value(SHOW BINLOG EVENTS, Event_type, 4)
+ # The first event is format_description, the second is Binlog_checkpoint,
+ # the third Query_event('BEGIN'), and the fourth should be our Query
# for 'INSERT DELAYED' unsafe_type 3, which is safe after
# the fix of bug#54579.
if (`SELECT $unsafe_type = 3 AND '$event_type' != 'Query'`) {
--enable_query_log
- --echo ******** Failure! Event number 3 was a '$event_type', not a 'Query'. ********
+ --echo ******** Failure! Event number 4 was a '$event_type', not a 'Query'. ********
SHOW BINLOG EVENTS;
--die Wrong events in binlog.
}
- # The first event is format_description, the second is
- # Query_event('BEGIN'), and the third should be our Table_map
+ # The first event is format_description, the second is Binlog_checkpoint,
+ # the third is Query_event('BEGIN'), and the fourth should be our Table_map
# for unsafe statement.
if (`SELECT $unsafe_type != 3 AND '$event_type' != 'Table_map'`) {
--enable_query_log
- --echo ******** Failure! Event number 3 was a '$event_type', not a 'Table_map'. ********
+ --echo ******** Failure! Event number 4 was a '$event_type', not a 'Table_map'. ********
SHOW BINLOG EVENTS;
--die Wrong events in binlog.
}
diff --git a/mysql-test/extra/rpl_tests/rpl_deadlock.test b/mysql-test/extra/rpl_tests/rpl_deadlock.test
index 0e862e041e6..56ff4683df8 100644
--- a/mysql-test/extra/rpl_tests/rpl_deadlock.test
+++ b/mysql-test/extra/rpl_tests/rpl_deadlock.test
@@ -34,7 +34,7 @@ INSERT INTO t3 VALUES (3);
COMMIT;
save_master_pos;
# Save BEGIN event into variable
-let $master_pos_begin= query_get_value(SHOW BINLOG EVENTS, Pos, 5);
+let $master_pos_begin= query_get_value(SHOW BINLOG EVENTS, Pos, 6);
--echo
# 1) Test deadlock
diff --git a/mysql-test/extra/rpl_tests/rpl_insert_delayed.test b/mysql-test/extra/rpl_tests/rpl_insert_delayed.test
index 62e90b629ba..3d7d3600199 100644
--- a/mysql-test/extra/rpl_tests/rpl_insert_delayed.test
+++ b/mysql-test/extra/rpl_tests/rpl_insert_delayed.test
@@ -94,8 +94,10 @@ if (`SELECT @@global.binlog_format = 'STATEMENT'`)
#flush the logs before the test
connection slave;
FLUSH LOGS;
+ source include/wait_for_binlog_checkpoint.inc;
connection master;
FLUSH LOGS;
+ source include/wait_for_binlog_checkpoint.inc;
}
CREATE TABLE t1(a int, UNIQUE(a));
@@ -135,7 +137,7 @@ if (`SELECT @@global.binlog_format = 'STATEMENT'`)
{
#must show two INSERT DELAYED
--let $binlog_file= query_get_value(SHOW MASTER STATUS, File, 1)
- --let $binlog_limit= 1,6
+ --let $binlog_limit= 2,6
--source include/show_binlog_events.inc
}
select * from t1;
diff --git a/mysql-test/extra/rpl_tests/rpl_log.test b/mysql-test/extra/rpl_tests/rpl_log.test
index 892d926a156..961996bb265 100644
--- a/mysql-test/extra/rpl_tests/rpl_log.test
+++ b/mysql-test/extra/rpl_tests/rpl_log.test
@@ -43,6 +43,7 @@ let $binlog_limit= 1,4;
source include/show_binlog_events.inc;
let $binlog_limit=;
flush logs;
+--source include/wait_for_binlog_checkpoint.inc
# We need an extra update before doing save_master_pos.
# Otherwise, an unlikely scenario may occur:
diff --git a/mysql-test/extra/rpl_tests/rpl_max_relay_size.test b/mysql-test/extra/rpl_tests/rpl_max_relay_size.test
index d8cd4f2d284..0fcff5ebe6c 100644
--- a/mysql-test/extra/rpl_tests/rpl_max_relay_size.test
+++ b/mysql-test/extra/rpl_tests/rpl_max_relay_size.test
@@ -34,7 +34,7 @@ reset slave;
--echo # Test 1
--echo #
-set @my_max_binlog_size= @@global.max_binlog_size;
+set @my_max_binlog_size= @@global.max_binlog_size, @my_max_relay_log_size=@@global.max_relay_log_size;
set global max_binlog_size=8192;
set global max_relay_log_size=8192-1; # mapped to 4096
select @@global.max_relay_log_size;
@@ -110,6 +110,7 @@ source include/show_master_status.inc;
# Restore max_binlog_size
connection slave;
set global max_binlog_size= @my_max_binlog_size;
+set global max_relay_log_size= @my_max_relay_log_size;
--echo #
--echo # End of 4.1 tests
diff --git a/mysql-test/extra/rpl_tests/rpl_show_relaylog_events.inc b/mysql-test/extra/rpl_tests/rpl_show_relaylog_events.inc
index a56e08ece42..d0a905d3b7d 100644
--- a/mysql-test/extra/rpl_tests/rpl_show_relaylog_events.inc
+++ b/mysql-test/extra/rpl_tests/rpl_show_relaylog_events.inc
@@ -41,8 +41,10 @@ INSERT INTO t1 VALUES (3);
#
FLUSH LOGS;
+--source include/wait_for_binlog_checkpoint.inc
-- connection master
FLUSH LOGS;
+--source include/wait_for_binlog_checkpoint.inc
DROP TABLE t1;
--let $is_relay_log= 0
diff --git a/mysql-test/include/alter_table_mdev539.inc b/mysql-test/include/alter_table_mdev539.inc
new file mode 100644
index 00000000000..a246434b779
--- /dev/null
+++ b/mysql-test/include/alter_table_mdev539.inc
@@ -0,0 +1,65 @@
+--echo #
+--echo # mdev-539: fast build of unique/primary indexes for MyISAM/Aria
+--echo #
+
+--disable_warnings
+DROP DATABASE IF EXISTS dbt3_s001;
+--enable_warnings
+CREATE DATABASE dbt3_s001;
+
+use dbt3_s001;
+
+--disable_query_log
+--disable_result_log
+--disable_warnings
+--source include/dbt3_s001.inc
+--enable_warnings
+--enable_result_log
+--enable_query_log
+
+
+drop index `primary` on lineitem;
+show create table lineitem;
+alter table lineitem add primary key (l_orderkey, l_linenumber);
+show create table lineitem;
+drop index `primary` on lineitem;
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+insert into lineitem values
+(1,68,9,2,36,34850.16,0.07,0.06,'N','O','1996-04-12','1996-02-28','1996-04-20','TAKE BACK RETURN','MAIL','slyly bold pinto beans detect s');
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+--error ER_DUP_ENTRY
+alter table lineitem add primary key (l_orderkey, l_linenumber);
+show create table lineitem;
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+delete from lineitem where l_orderkey=1 and l_linenumber=2 and l_discount=0.07;
+alter table lineitem add primary key (l_orderkey, l_linenumber);
+show create table lineitem;
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+
+create unique index i_c_name on customer(c_name);
+show create table customer;
+select * from customer where c_name='Customer#000000003';
+drop index i_c_name on customer;
+insert into customer values
+(303,'Customer#000000003','MG9kdTD2WBHm',1,'11-719-748-3364',7498.12,'AUTOMOBILE','special packages wake. slyly reg');
+select * from customer where c_name='Customer#000000003';
+--error ER_DUP_ENTRY
+alter table customer add unique index i_c_name(c_name);
+show create table customer;
+select * from customer where c_name='Customer#000000003';
+delete from customer where c_custkey=303;
+select * from customer where c_name='Customer#000000003';
+alter table customer add unique index i_c_name(c_name);
+show create table customer;
+select * from customer where c_name='Customer#000000003';
+
+drop index `primary` on customer;
+show create table customer;
+insert into customer values
+(3,'Customer#000000303','MG9kdTD2WBHm',1,'11-719-748-3364',7498.12,'AUTOMOBILE','special packages wake. slyly reg');
+alter ignore table customer add primary key (c_custkey);
+show create table customer;
+select * from customer where c_custkey=3;
+
+DROP DATABASE dbt3_s001;
+
diff --git a/mysql-test/include/binlog_start_pos.inc b/mysql-test/include/binlog_start_pos.inc
index add5a42a426..5237e357b28 100644
--- a/mysql-test/include/binlog_start_pos.inc
+++ b/mysql-test/include/binlog_start_pos.inc
@@ -15,14 +15,14 @@
# 1 /* Checksum algorithm */ +
# 4 /* CRC32 length */
#
-# With current number of events = 160,
+# With current number of events = 161,
#
-# binlog_start_pos = 4 + 19 + 57 + 160 + 1 + 4 = 245.
+# binlog_start_pos = 4 + 19 + 57 + 161 + 1 + 4 = 246.
#
##############################################################################
-let $binlog_start_pos=245;
+let $binlog_start_pos=246;
--disable_query_log
-SET @binlog_start_pos=245;
+SET @binlog_start_pos=246;
--enable_query_log
diff --git a/mysql-test/include/function_defaults.inc b/mysql-test/include/function_defaults.inc
new file mode 100644
index 00000000000..e588c82df1b
--- /dev/null
+++ b/mysql-test/include/function_defaults.inc
@@ -0,0 +1,1166 @@
+SET TIME_ZONE = "+00:00";
+
+--echo #
+--echo # Test of errors for column data types that dont support function
+--echo # defaults.
+--echo #
+--error ER_INVALID_DEFAULT
+eval CREATE TABLE t1( a BIT DEFAULT $current_timestamp );
+--error ER_INVALID_DEFAULT
+eval CREATE TABLE t1( a TINYINT DEFAULT $current_timestamp );
+--error ER_INVALID_DEFAULT
+eval CREATE TABLE t1( a SMALLINT DEFAULT $current_timestamp );
+--error ER_INVALID_DEFAULT
+eval CREATE TABLE t1( a MEDIUMINT DEFAULT $current_timestamp );
+--error ER_INVALID_DEFAULT
+eval CREATE TABLE t1( a INT DEFAULT $current_timestamp );
+--error ER_INVALID_DEFAULT
+eval CREATE TABLE t1( a BIGINT DEFAULT $current_timestamp );
+--error ER_INVALID_DEFAULT
+eval CREATE TABLE t1( a FLOAT DEFAULT $current_timestamp );
+--error ER_INVALID_DEFAULT
+eval CREATE TABLE t1( a DECIMAL DEFAULT $current_timestamp );
+--error ER_INVALID_DEFAULT
+eval CREATE TABLE t1( a DATE DEFAULT $current_timestamp );
+--error ER_INVALID_DEFAULT
+eval CREATE TABLE t1( a TIME DEFAULT $current_timestamp );
+--error ER_INVALID_DEFAULT
+eval CREATE TABLE t1( a YEAR DEFAULT $current_timestamp );
+
+--error ER_INVALID_ON_UPDATE
+eval CREATE TABLE t1( a BIT ON UPDATE $current_timestamp );
+--error ER_INVALID_ON_UPDATE
+eval CREATE TABLE t1( a TINYINT ON UPDATE $current_timestamp );
+--error ER_INVALID_ON_UPDATE
+eval CREATE TABLE t1( a SMALLINT ON UPDATE $current_timestamp );
+--error ER_INVALID_ON_UPDATE
+eval CREATE TABLE t1( a MEDIUMINT ON UPDATE $current_timestamp );
+--error ER_INVALID_ON_UPDATE
+eval CREATE TABLE t1( a INT ON UPDATE $current_timestamp );
+--error ER_INVALID_ON_UPDATE
+eval CREATE TABLE t1( a BIGINT ON UPDATE $current_timestamp );
+--error ER_INVALID_ON_UPDATE
+eval CREATE TABLE t1( a FLOAT ON UPDATE $current_timestamp );
+--error ER_INVALID_ON_UPDATE
+eval CREATE TABLE t1( a DECIMAL ON UPDATE $current_timestamp );
+--error ER_INVALID_ON_UPDATE
+eval CREATE TABLE t1( a DATE ON UPDATE $current_timestamp );
+--error ER_INVALID_ON_UPDATE
+eval CREATE TABLE t1( a TIME ON UPDATE $current_timestamp );
+--error ER_INVALID_ON_UPDATE
+eval CREATE TABLE t1( a YEAR ON UPDATE $current_timestamp );
+
+--echo #
+--echo # Test that the default clause behaves like NOW() regarding time zones.
+--echo #
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ b $timestamp NOT NULL DEFAULT $current_timestamp,
+ c $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ d $timestamp NULL,
+ e $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ f $datetime DEFAULT $current_timestamp,
+ g $datetime ON UPDATE $current_timestamp,
+ h $datetime
+);
+
+--echo # 2011-09-27 14:11:08 UTC
+SET TIMESTAMP = 1317132668.654321;
+SET @old_time_zone = @@TIME_ZONE;
+SET TIME_ZONE = "+05:00";
+
+eval INSERT INTO t1( d, h ) VALUES ( $now, $now );
+SELECT * FROM t1;
+
+--echo # 1989-05-13 01:02:03
+SET TIMESTAMP = 611017323.543212;
+eval UPDATE t1 SET d = $now, h = $now;
+SELECT * FROM t1;
+
+SET TIME_ZONE = @old_time_zone;
+DROP TABLE t1;
+
+--echo #
+--echo # Test of several TIMESTAMP columns with different function defaults.
+--echo #
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ c $timestamp NOT NULL DEFAULT $current_timestamp,
+ d $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ e $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ f INT
+);
+
+--echo # 2011-04-19 07:22:02 UTC
+SET TIMESTAMP = 1303197722.534231;
+
+INSERT INTO t1 ( f ) VALUES (1);
+SELECT * FROM t1;
+
+--echo # 2011-04-19 07:23:18 UTC
+SET TIMESTAMP = 1303197798.132435;
+
+UPDATE t1 SET f = 2;
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Test of inserted values out of order.
+--echo #
+eval CREATE TABLE t1 (
+ a INT,
+ b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ c $timestamp NOT NULL DEFAULT $current_timestamp,
+ d $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ e $timestamp NULL,
+ f $datetime,
+ g $datetime DEFAULT $current_timestamp,
+ h $datetime ON UPDATE $current_timestamp,
+ i $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ j INT
+);
+
+--echo # 2011-04-19 07:22:02 UTC
+SET TIMESTAMP = 1303197722.534231;
+
+INSERT INTO t1 ( j, a ) VALUES ( 1, 1 );
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Test of ON DUPLICATE KEY UPDATE
+--echo #
+eval CREATE TABLE t1 (
+ a INT PRIMARY KEY,
+ b INT,
+ c $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ d $timestamp NOT NULL DEFAULT $current_timestamp,
+ e $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ f $timestamp NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+ g $timestamp NULL,
+ h $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ i $datetime DEFAULT $current_timestamp,
+ j $datetime ON UPDATE $current_timestamp,
+ k $datetime NULL,
+ l $datetime DEFAULT '1986-09-27 03:00:00.098765'
+);
+
+--echo # 1977-12-21 23:00:00 UTC
+SET TIMESTAMP = 251593200.192837;
+INSERT INTO t1(a) VALUES (1) ON DUPLICATE KEY UPDATE b = 2;
+SELECT * FROM t1;
+
+--echo # 1975-05-21 23:00:00 UTC
+SET TIMESTAMP = 169945200.918273;
+INSERT INTO t1(a) VALUES (1) ON DUPLICATE KEY UPDATE b = 2;
+SELECT * FROM t1;
+
+--echo # 1973-08-14 09:11:22 UTC
+SET TIMESTAMP = 114167482.534231;
+INSERT INTO t1(a) VALUES (2) ON DUPLICATE KEY UPDATE b = 2;
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+eval CREATE TABLE t1 ( a INT PRIMARY KEY, b INT, c $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp );
+
+--echo # 2011-04-19 07:23:18 UTC
+SET TIMESTAMP = 1303197798.945156;
+
+INSERT INTO t1 VALUES
+ (1, 0, '2001-01-01 01:01:01.111111'),
+ (2, 0, '2002-02-02 02:02:02.222222'),
+ (3, 0, '2003-03-03 03:03:03.333333');
+SELECT * FROM t1;
+
+UPDATE t1 SET b = 2, c = c WHERE a = 2;
+SELECT * FROM t1;
+
+INSERT INTO t1 (a) VALUES (4);
+SELECT * FROM t1;
+
+UPDATE t1 SET c = '2004-04-04 04:04:04.444444' WHERE a = 4;
+SELECT * FROM t1;
+
+INSERT INTO t1 ( a ) VALUES ( 3 ), ( 5 ) ON DUPLICATE KEY UPDATE b = 3, c = c;
+SELECT * FROM t1;
+
+INSERT INTO t1 (a, c) VALUES
+ (4, '2004-04-04 00:00:00.444444'),
+ (6, '2006-06-06 06:06:06.666666')
+ON DUPLICATE KEY UPDATE b = 4;
+
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+
+--echo #
+--echo # Test of REPLACE INTO executed as UPDATE.
+--echo #
+eval CREATE TABLE t1 (
+ a INT PRIMARY KEY,
+ b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ c $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ d $timestamp NOT NULL DEFAULT $current_timestamp,
+ e $datetime DEFAULT $current_timestamp,
+ f $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ g $datetime ON UPDATE $current_timestamp,
+ h $timestamp NULL,
+ i $datetime
+);
+
+--echo # 1970-09-21 09:11:12 UTC
+SET TIMESTAMP = 22756272.163584;
+
+REPLACE INTO t1 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+
+--echo # 1970-11-10 14:16:17 UTC
+SET TIMESTAMP = 27094577.852954;
+
+
+REPLACE INTO t1 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+
+--echo #
+--echo # Test of insertion of NULL, DEFAULT and an empty row for DEFAULT
+--echo # CURRENT_TIMESTAMP.
+--echo #
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ b $datetime DEFAULT $current_timestamp,
+ c INT
+);
+
+--echo # 2011-04-20 09:53:41 UTC
+SET TIMESTAMP = 1303293221.163578;
+
+INSERT INTO t1 VALUES (NULL, NULL, 1), (DEFAULT, DEFAULT, 2);
+INSERT INTO t1 ( a, b, c ) VALUES (NULL, NULL, 3), (DEFAULT, DEFAULT, 4);
+SELECT * FROM t1;
+
+SET TIME_ZONE = "+03:00";
+SELECT * FROM t1;
+SET TIME_ZONE = "+00:00";
+
+DROP TABLE t1;
+
+--echo # 2011-04-20 07:05:39 UTC
+SET TIMESTAMP = 1303283139.195624;
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT '2010-10-11 12:34:56' ON UPDATE $current_timestamp,
+ b $datetime DEFAULT '2010-10-11 12:34:56'
+);
+
+INSERT INTO t1 VALUES (NULL, NULL), (DEFAULT, DEFAULT);
+INSERT INTO t1 ( a, b ) VALUES (NULL, NULL), (DEFAULT, DEFAULT);
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+--echo # 2011-04-20 09:53:41 UTC
+SET TIMESTAMP = 1303293221.136952;
+
+eval CREATE TABLE t1 (
+a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+b $timestamp NOT NULL DEFAULT $current_timestamp,
+c $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+d $timestamp NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+e $timestamp NULL,
+f $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+g $datetime DEFAULT $current_timestamp,
+h $datetime ON UPDATE $current_timestamp,
+i $datetime NULL,
+j $datetime DEFAULT '1986-09-27 03:00:00.098765'
+);
+
+INSERT INTO t1 VALUES ();
+
+INSERT INTO t1 SELECT NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL;
+
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Test of multiple-table UPDATE for DEFAULT CURRENT_TIMESTAMP
+--echo #
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ b $datetime DEFAULT $current_timestamp,
+ c INT
+);
+
+INSERT INTO t1 ( c ) VALUES (1);
+SELECT * FROM t1;
+
+--echo # 2011-04-20 17:06:13 UTC
+SET TIMESTAMP = 1303311973.163587;
+
+UPDATE t1 t11, t1 t12 SET t11.c = 1;
+SELECT * FROM t1;
+
+UPDATE t1 t11, t1 t12 SET t11.c = 2;
+
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT $current_timestamp,
+ b $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ c $datetime DEFAULT $current_timestamp,
+ d $datetime ON UPDATE $current_timestamp,
+ e INT
+);
+
+eval CREATE TABLE t2 (
+ f INT,
+ g $datetime ON UPDATE $current_timestamp,
+ h $datetime DEFAULT $current_timestamp,
+ i $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ j $timestamp NOT NULL DEFAULT $current_timestamp
+);
+
+--echo # 1995-03-11 00:02:03 UTC
+SET TIMESTAMP = 794880123.195676;
+
+INSERT INTO t1 ( e ) VALUES ( 1 ), ( 2 );
+
+INSERT INTO t2 ( f ) VALUES ( 1 ), ( 2 );
+
+SELECT * FROM t1;
+SELECT * FROM t2;
+
+--echo # 1980-12-13 02:02:01 UTC
+SET TIMESTAMP = 345520921.196755;
+
+UPDATE t1, t2 SET t1.e = 3, t2.f = 4;
+
+SELECT * FROM t1;
+SELECT * FROM t2;
+
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Test of multiple table update with temporary table and on the fly.
+--echo #
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ b $datetime ON UPDATE $current_timestamp,
+ c INT,
+ d INT
+);
+
+eval CREATE TABLE t2 (
+ a $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ b $datetime ON UPDATE $current_timestamp,
+ c INT KEY,
+ d INT
+);
+
+INSERT INTO t1 ( c ) VALUES (1), (2);
+INSERT INTO t2 ( c ) VALUES (1), (2);
+
+--echo # Test of multiple table update done on the fly
+--echo # 2011-04-20 15:06:13 UTC
+SET TIMESTAMP = 1303311973.194685;
+UPDATE t1 JOIN t2 USING ( c ) SET t2.d = 1;
+SELECT * FROM t1;
+SELECT * FROM t2;
+
+--echo # Test of multiple table update done with temporary table.
+--echo # 1979-01-15 03:02:01
+SET TIMESTAMP = 285213721.134679;
+UPDATE t1 JOIN t2 USING ( c ) SET t1.d = 1;
+SELECT * FROM t1;
+SELECT * FROM t2;
+
+DROP TABLE t1, t2;
+
+
+--echo #
+--echo # Test of ON UPDATE CURRENT_TIMESTAMP.
+--echo #
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ b $datetime ON UPDATE $current_timestamp,
+ c INT
+);
+
+--echo # 2011-04-20 09:53:41 UTC
+SET TIMESTAMP = 1303293221.794613;
+
+INSERT INTO t1 ( c ) VALUES ( 1 );
+SELECT * FROM t1;
+
+UPDATE t1 SET c = 1;
+SELECT * FROM t1;
+
+UPDATE t1 SET c = 2;
+SELECT * FROM t1;
+
+--echo #
+--echo # Test of multiple-table UPDATE for ON UPDATE CURRENT_TIMESTAMP
+--echo #
+--echo # 2011-04-20 15:06:13 UTC
+SET TIMESTAMP = 1303311973.534231;
+
+UPDATE t1 t11, t1 t12 SET t11.c = 2;
+SELECT * FROM t1;
+
+UPDATE t1 t11, t1 t12 SET t11.c = 3;
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Test of a multiple-table update where only one table is updated and
+--echo # the updated table has a primary key.
+--echo #
+eval CREATE TABLE t1 ( a INT, b INT, PRIMARY KEY (a) );
+INSERT INTO t1 VALUES (1, 1),(2, 2),(3, 3),(4, 4);
+
+eval CREATE TABLE t2 ( a INT, b INT );
+INSERT INTO t2 VALUES (1, 1),(2, 2),(3, 3),(4, 4),(5, 5);
+
+UPDATE t1, t2 SET t1.b = 100 WHERE t1.a = t2.a;
+
+SELECT * FROM t1;
+SELECT * FROM t2;
+
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Test of ALTER TABLE, reordering columns.
+--echo #
+eval CREATE TABLE t1 ( a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp, b INT );eval ALTER TABLE t1 MODIFY a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp AFTER b;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 ( a INT, b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp, c $timestamp NULL );eval ALTER TABLE t1 MODIFY b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp FIRST;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 ( a INT, b $timestamp NULL );eval ALTER TABLE t1 MODIFY b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp FIRST;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 ( a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp, b $timestamp NULL );eval ALTER TABLE t1 MODIFY a $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' AFTER b;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 ( a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp, b $timestamp NULL );eval ALTER TABLE t1 MODIFY a $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' AFTER b;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 ( a $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $now, b INT, c $timestamp NULL );
+SHOW CREATE TABLE t1;eval ALTER TABLE t1 MODIFY a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp AFTER b;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 ( a $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $now, b INT, c $timestamp NULL );eval ALTER TABLE t1 MODIFY c $timestamp NULL FIRST;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 ( a $timestamp NOT NULL DEFAULT $now ON UPDATE $current_timestamp, b INT, c $timestamp NULL );
+SHOW CREATE TABLE t1;eval ALTER TABLE t1 MODIFY a $timestamp NOT NULL DEFAULT $now ON UPDATE $current_timestamp AFTER b;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 ( a $timestamp NOT NULL DEFAULT $now ON UPDATE $current_timestamp, b INT, c $timestamp NULL );eval ALTER TABLE t1 MODIFY c $timestamp NULL FIRST;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Test of ALTER TABLE, adding columns.
+--echo #
+eval CREATE TABLE t1 ( a INT );
+eval ALTER TABLE t1 ADD COLUMN b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Test of INSERT SELECT.
+--echo #
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ c $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ d $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp
+);
+
+eval CREATE TABLE t2 (
+ placeholder1 INT,
+ placeholder2 INT,
+ placeholder3 INT,
+ placeholder4 INT,
+ a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ b $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
+ c $datetime,
+ d $datetime
+);
+
+--echo # 1977-08-16 15:30:01 UTC
+SET TIMESTAMP = 240589801.654312;
+
+INSERT INTO t2 (a, b, c, d) VALUES (
+ '1977-08-16 15:30:01.123456',
+ '1977-08-16 15:30:01.234567',
+ '1977-08-16 15:30:01.345678',
+ '1977-08-16 15:30:01.456789'
+);
+
+--echo # 1986-09-27 01:00:00 UTC
+SET TIMESTAMP = 528166800.132435;
+
+INSERT INTO t1 ( a, c ) SELECT a, c FROM t2;
+
+SELECT * FROM t1;
+
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Test of CREATE TABLE SELECT.
+--echo #
+--echo # We test that the columns of the source table are not used to determine
+--echo # function defaults for the receiving table.
+--echo #
+
+--echo # 1970-04-11 20:13:57 UTC
+SET TIMESTAMP = 8712837.657898;
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ b $timestamp NOT NULL DEFAULT $current_timestamp,
+ c $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ d $timestamp NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+ e $timestamp NULL,
+ f $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ g $datetime DEFAULT $current_timestamp,
+ h $datetime ON UPDATE $current_timestamp,
+ i $datetime NULL,
+ j $datetime DEFAULT '1986-09-27 03:00:00.098765'
+);
+
+INSERT INTO t1 VALUES ();
+
+--echo # 1971-01-31 21:13:57 UTC
+SET TIMESTAMP = 34200837.164937;
+
+eval CREATE TABLE t2 SELECT a FROM t1; SHOW CREATE TABLE t2; SELECT * FROM t2;
+eval CREATE TABLE t3 SELECT b FROM t1; SHOW CREATE TABLE t3; SELECT * FROM t3;
+eval CREATE TABLE t4 SELECT c FROM t1; SHOW CREATE TABLE t4; SELECT * FROM t4;
+eval CREATE TABLE t5 SELECT d FROM t1; SHOW CREATE TABLE t5; SELECT * FROM t5;
+eval CREATE TABLE t6 SELECT e FROM t1; SHOW CREATE TABLE t6; SELECT * FROM t6;
+eval CREATE TABLE t7 SELECT f FROM t1; SHOW CREATE TABLE t7; SELECT * FROM t7;
+eval CREATE TABLE t8 SELECT g FROM t1; SHOW CREATE TABLE t8; SELECT * FROM t8;
+eval CREATE TABLE t9 SELECT h FROM t1; SHOW CREATE TABLE t9; SELECT * FROM t9;
+eval CREATE TABLE t10 SELECT i FROM t1; SHOW CREATE TABLE t10; SELECT * FROM t10;
+eval CREATE TABLE t11 SELECT j FROM t1; SHOW CREATE TABLE t11; SELECT * FROM t11;
+
+eval CREATE TABLE t12 (
+ k $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ l $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ m $timestamp NOT NULL DEFAULT $current_timestamp,
+ n $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ o $timestamp NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+ p $timestamp NULL,
+ q $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ r $datetime DEFAULT $current_timestamp,
+ s $datetime ON UPDATE $current_timestamp,
+ t $datetime NULL,
+ u $datetime DEFAULT '1986-09-27 03:00:00.098765'
+)
+SELECT * FROM t1;
+
+SHOW CREATE TABLE t12;
+
+DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
+
+--echo # 1970-04-11 20:13:57 UTC
+SET TIMESTAMP = 8712837.164953;
+eval CREATE TABLE t1 (
+ a $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ b $datetime DEFAULT $current_timestamp,
+ c $datetime ON UPDATE $current_timestamp,
+ d $datetime NULL,
+ e $datetime DEFAULT '1986-09-27 03:00:00.098765'
+);
+
+INSERT INTO t1 VALUES ();
+
+--echo # 1971-01-31 20:13:57 UTC
+SET TIMESTAMP = 34200837.915736;
+
+eval CREATE TABLE t2 SELECT a FROM t1;
+SHOW CREATE TABLE t2;
+SELECT * FROM t2;
+
+eval CREATE TABLE t3 SELECT b FROM t1;
+SHOW CREATE TABLE t3;
+SELECT * FROM t3;
+
+eval CREATE TABLE t4 SELECT c FROM t1;
+SHOW CREATE TABLE t4;
+SELECT * FROM t4;
+
+eval CREATE TABLE t5 SELECT d FROM t1;
+SHOW CREATE TABLE t5;
+SELECT * FROM t5;
+
+eval CREATE TABLE t6 SELECT e FROM t1;
+SHOW CREATE TABLE t6;
+SELECT * FROM t6;
+
+DROP TABLE t1, t2, t3, t4, t5, t6;
+
+--echo #
+--echo # Test of a CREATE TABLE SELECT that also declared columns. In this case
+--echo # the function default should be de-activated during the execution of the
+--echo # CREATE TABLE statement.
+--echo #
+--echo # 1970-01-01 03:16:40
+SET TIMESTAMP = 1000.987654;
+eval CREATE TABLE t1 ( a INT );
+INSERT INTO t1 VALUES ( 1 ), ( 2 );
+
+eval CREATE TABLE t2 ( b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp) SELECT a FROM t1;
+
+SHOW CREATE TABLE t2;
+SET TIMESTAMP = 2000.876543;
+INSERT INTO t2( a ) VALUES ( 3 );
+SELECT * FROM t2;
+
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Test of updating a view.
+--echo #
+eval CREATE TABLE t1 ( a INT, b $datetime DEFAULT $current_timestamp );
+eval CREATE TABLE t2 ( a INT, b $datetime ON UPDATE $current_timestamp );
+
+eval CREATE VIEW v1 AS SELECT * FROM t1;
+SHOW CREATE VIEW v1;
+
+eval CREATE VIEW v2 AS SELECT * FROM t2;
+SHOW CREATE VIEW v2;
+
+--echo # 1971-01-31 21:13:57 UTC
+SET TIMESTAMP = 34200837.348564;
+
+INSERT INTO v1 ( a ) VALUES ( 1 );
+INSERT INTO v2 ( a ) VALUES ( 1 );
+
+SELECT * FROM t1;
+SELECT * FROM v1;
+
+SELECT * FROM t2;
+SELECT * FROM v2;
+
+--echo # 1970-04-11 20:13:57 UTC
+SET TIMESTAMP = 8712837.567332;
+UPDATE v1 SET a = 2;
+UPDATE v2 SET a = 2;
+
+SELECT * FROM t1;
+SELECT * FROM v1;
+
+SELECT * FROM t2;
+SELECT * FROM v2;
+
+DROP VIEW v1, v2;
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Test with stored procedures.
+--echo #
+eval CREATE TABLE t1 (
+ a INT,
+ b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ c $timestamp NOT NULL DEFAULT $current_timestamp,
+ d $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ e $timestamp NULL,
+ f $datetime DEFAULT $current_timestamp,
+ g $datetime ON UPDATE $current_timestamp
+);
+CREATE PROCEDURE p1() INSERT INTO test.t1( a ) VALUES ( 1 );
+CREATE PROCEDURE p2() UPDATE t1 SET a = 2 WHERE a = 1;
+
+--echo # 1971-01-31 20:13:57 UTC
+SET TIMESTAMP = 34200837.876544;
+CALL p1();
+SELECT * FROM t1;
+
+--echo # 1970-04-11 21:13:57 UTC
+SET TIMESTAMP = 8712837.143546;
+CALL p2();
+SELECT * FROM t1;
+
+DROP PROCEDURE p1;
+DROP PROCEDURE p2;
+DROP TABLE t1;
+
+--echo #
+--echo # Test with triggers.
+--echo #
+eval CREATE TABLE t1 (
+ a INT,
+ b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ c $timestamp NOT NULL DEFAULT $current_timestamp,
+ d $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ e $timestamp NULL,
+ f $datetime,
+ g $datetime DEFAULT $current_timestamp,
+ h $datetime ON UPDATE $current_timestamp,
+ i $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp
+);
+
+eval CREATE TABLE t2 ( a INT );
+
+DELIMITER |;
+eval CREATE TRIGGER t2_trg BEFORE INSERT ON t2 FOR EACH ROW
+BEGIN
+ INSERT INTO t1 ( a ) VALUES ( 1 );
+END|
+DELIMITER ;|
+
+--echo # 1971-01-31 21:13:57 UTC
+SET TIMESTAMP = 34200837.978675;
+
+INSERT INTO t2 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+
+DROP TRIGGER t2_trg;
+
+DELIMITER |;
+eval CREATE TRIGGER t2_trg BEFORE INSERT ON t2 FOR EACH ROW
+BEGIN
+ UPDATE t1 SET a = 2;
+END|
+DELIMITER ;|
+
+--echo # 1970-04-11 21:13:57 UTC
+SET TIMESTAMP = 8712837.456789;
+
+INSERT INTO t2 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Test where the assignment target is not a column.
+--echo #
+eval CREATE TABLE t1 ( a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp );
+eval CREATE TABLE t2 ( a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp );
+eval CREATE TABLE t3 ( a $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp );
+eval CREATE TABLE t4 ( a $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp );
+
+eval CREATE VIEW v1 AS SELECT a COLLATE latin1_german1_ci AS b FROM t1;
+eval CREATE VIEW v2 ( b ) AS SELECT a COLLATE latin1_german1_ci FROM t2;
+eval CREATE VIEW v3 AS SELECT a COLLATE latin1_german1_ci AS b FROM t3;
+eval CREATE VIEW v4 ( b ) AS SELECT a COLLATE latin1_german1_ci FROM t4;
+
+INSERT INTO v1 ( b ) VALUES ( '2007-10-24 00:03:34.010203' );
+SELECT a FROM t1;
+
+INSERT INTO v2 ( b ) VALUES ( '2007-10-24 00:03:34.010203' );
+SELECT a FROM t2;
+
+INSERT INTO t3 VALUES ();
+UPDATE v3 SET b = '2007-10-24 00:03:34.010203';
+SELECT a FROM t3;
+
+INSERT INTO t4 VALUES ();
+UPDATE v4 SET b = '2007-10-24 00:03:34.010203';
+SELECT a FROM t4;
+
+DROP VIEW v1, v2, v3, v4;
+DROP TABLE t1, t2, t3, t4;
+
+--echo #
+--echo # Test of LOAD DATA/XML INFILE
+--echo # This tests behavior of function defaults for TIMESTAMP and DATETIME
+--echo # columns. during LOAD ... INFILE.
+--echo # As can be seen here, a TIMESTAMP column with only ON UPDATE
+--echo # CURRENT_TIMESTAMP will still have CURRENT_TIMESTAMP inserted on LOAD
+--echo # ... INFILE if the value is missing. For DATETIME columns a NULL value
+--echo # is inserted instead.
+--echo #
+
+eval CREATE TABLE t1 (
+ a INT,
+ b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ c $timestamp NOT NULL DEFAULT $current_timestamp,
+ d $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ e $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ f $datetime,
+ g $datetime DEFAULT $current_timestamp,
+ h $datetime ON UPDATE $current_timestamp,
+ i $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp
+);
+
+eval CREATE TABLE t2 (
+ a $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ b $timestamp NOT NULL DEFAULT $current_timestamp,
+ c $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $current_timestamp,
+ d $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ e $datetime NOT NULL,
+ f $datetime NOT NULL DEFAULT '1977-01-02 12:13:14',
+ g $datetime DEFAULT $current_timestamp NOT NULL,
+ h $datetime ON UPDATE $current_timestamp NOT NULL,
+ i $datetime DEFAULT $current_timestamp ON UPDATE $current_timestamp NOT NULL
+);
+
+SELECT 1 INTO OUTFILE 't3.dat' FROM dual;
+
+SELECT NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
+INTO OUTFILE 't4.dat'
+FROM dual;
+
+SELECT 1, 2 INTO OUTFILE 't5.dat' FROM dual;
+
+--echo # Mon Aug 1 15:11:19 2011 UTC
+SET TIMESTAMP = 1312211479.918273;
+
+LOAD DATA INFILE 't3.dat' INTO TABLE t1;
+--query_vertical SELECT * FROM t1
+
+LOAD DATA INFILE 't4.dat' INTO TABLE t2;
+SELECT a FROM t2;
+SELECT b FROM t2;
+SELECT c FROM t2;
+SELECT d FROM t2;
+--echo # As shown here, supplying a NULL value to a non-nullable
+--echo # column with no default value results in the zero date.
+SELECT e FROM t2;
+--echo # As shown here, supplying a NULL value to a non-nullable column with a
+--echo # default value results in the zero date.
+SELECT f FROM t2;
+--echo # As shown here, supplying a NULL value to a non-nullable column with a
+--echo # default function results in the zero date.
+SELECT g FROM t2;
+--echo # As shown here, supplying a NULL value to a non-nullable DATETIME ON
+--echo # UPDATE CURRENT_TIMESTAMP column with no default value results in the
+--echo # zero date.
+SELECT h FROM t2;
+SELECT i FROM t2;
+
+DELETE FROM t1;
+DELETE FROM t2;
+
+--echo # Read t3 file into t1
+--echo # The syntax will cause a different code path to be taken
+--echo # (read_fixed_length()) than under the LOAD ... INTO TABLE t1 command
+--echo # above. The code in this path is copy-pasted code from the path taken
+--echo # under the syntax used in the previous LOAD command.
+LOAD DATA INFILE 't3.dat' INTO TABLE t1
+FIELDS TERMINATED BY '' ENCLOSED BY '';
+
+SELECT b FROM t1;
+SELECT c FROM t1;
+SELECT d FROM t1;
+SELECT e FROM t1;
+--echo # Yes, a missing field cannot be NULL using this syntax, so it will
+--echo # zero date instead. Says a comment in read_fixed_length() : "No fields
+--echo # specified in fields_vars list can be NULL in this format."
+--echo # It appears to be by design. This is inconsistent with LOAD DATA INFILE
+--echo # syntax in previous test.
+SELECT f FROM t1;
+SELECT g FROM t1;
+--echo # See comment above "SELECT f FROM f1".
+SELECT h FROM t1;
+SELECT i FROM t1;
+DELETE FROM t1;
+
+LOAD DATA INFILE 't5.dat' INTO TABLE t1 ( a, @dummy );
+SELECT * FROM t1;
+SELECT @dummy;
+DELETE FROM t1;
+
+LOAD DATA INFILE 't3.dat' INTO TABLE t1 ( a ) SET c = '2005-06-06 08:09:10';
+SELECT * FROM t1;
+DELETE FROM t1;
+
+LOAD DATA INFILE 't3.dat' INTO TABLE t1 ( a ) SET g = '2005-06-06 08:09:10';
+SELECT * FROM t1;
+DELETE FROM t1;
+
+--echo # Load a static XML file
+LOAD XML INFILE '../../std_data/onerow.xml' INTO TABLE t1
+ROWS IDENTIFIED BY '<row>';
+
+--echo Missing tags are treated as NULL
+--query_vertical SELECT * FROM t1
+
+DROP TABLE t1, t2;
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+remove_file $MYSQLD_DATADIR/test/t3.dat;
+remove_file $MYSQLD_DATADIR/test/t4.dat;
+remove_file $MYSQLD_DATADIR/test/t5.dat;
+
+
+--echo #
+--echo # Similar LOAD DATA tests in another form
+--echo #
+--echo # All of this test portion has been run on a pre-WL5874 trunk
+--echo # (except that like_b and like_c didn't exist) and all result
+--echo # differences are a bug.
+--echo # Regarding like_b its definition is the same as b's except
+--echo # that the constant default is replaced with a function
+--echo # default. Our expectation is that like_b would behave
+--echo # like b: if b is set to NULL, or set to 0000-00-00, or set to
+--echo # its default, then the same should apply to like_b. Same for
+--echo # like_c vs c.
+
+--echo # Mon Aug 1 15:11:19 2011 UTC
+SET TIMESTAMP = 1312211479.089786;
+
+SELECT 1 INTO OUTFILE "file1.dat" FROM dual;
+SELECT NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
+ INTO OUTFILE "file2.dat" FROM dual;
+
+--echo # Too short row
+
+eval
+CREATE TABLE t1 (
+ dummy INT,
+ a $datetime NULL DEFAULT NULL,
+ b $datetime NULL DEFAULT "2011-11-18",
+ like_b $datetime NULL DEFAULT $current_timestamp,
+ c $datetime NOT NULL DEFAULT "2011-11-18",
+ like_c $datetime NOT NULL DEFAULT $current_timestamp,
+ d $timestamp NULL DEFAULT "2011-05-03" ON UPDATE $current_timestamp,
+ e $timestamp NOT NULL DEFAULT "2011-05-03",
+ f $timestamp NOT NULL DEFAULT $current_timestamp,
+ g $timestamp NULL DEFAULT NULL,
+ h INT NULL,
+ i INT NOT NULL DEFAULT 42
+);
+
+--echo # There is no promotion
+SHOW CREATE TABLE t1;
+
+LOAD DATA INFILE "file1.dat" INTO table t1;
+
+--echo # It is strange that "like_b" gets NULL when "b" gets 0. But
+--echo # this is consistent with how "a" gets NULL when "b" gets 0,
+--echo # with how "g" gets NULL when "d" gets 0, and with how "h" gets
+--echo # NULL when "i" gets 0. Looks like "DEFAULT
+--echo # <non-NULL-constant>" is changed to 0, whereas DEFAULT NULL
+--echo # and DEFAULT NOW are changed to NULL.
+--query_vertical SELECT * FROM t1
+delete from t1;
+
+alter table t1
+modify f TIMESTAMP NULL default CURRENT_TIMESTAMP;
+
+--echo # There is no promotion
+SHOW CREATE TABLE t1;
+
+LOAD DATA INFILE "file1.dat" INTO table t1;
+
+--query_vertical SELECT * FROM t1
+delete from t1;
+
+drop table t1;
+
+--echo # Conclusion derived from trunk's results:
+--echo # DATETIME DEFAULT <non-NULL-constant> (b,c) gets 0000-00-00,
+--echo # DATETIME DEFAULT NULL (a) gets NULL,
+--echo # TIMESTAMP NULL DEFAULT <non-NULL-constant> (d) gets 0000-00-00,
+--echo # TIMESTAMP NULL DEFAULT NULL (g) gets NULL,
+--echo # TIMESTAMP NULL DEFAULT NOW (f after ALTER) gets NULL,
+--echo # TIMESTAMP NOT NULL (f before ALTER, e) gets NOW.
+
+--echo ### Loading NULL ###
+
+eval
+CREATE TABLE t1 (
+ dummy INT,
+ a $datetime NULL DEFAULT NULL,
+ b $datetime NULL DEFAULT "2011-11-18",
+ like_b $datetime NULL DEFAULT $current_timestamp,
+ c $datetime NOT NULL DEFAULT "2011-11-18",
+ like_c $datetime NOT NULL DEFAULT $current_timestamp,
+ d $timestamp NULL DEFAULT "2011-05-03" ON UPDATE $current_timestamp,
+ e $timestamp NOT NULL DEFAULT "2011-05-03",
+ f $timestamp NOT NULL DEFAULT $current_timestamp,
+ g $timestamp NULL DEFAULT NULL,
+ h INT NULL,
+ i INT NOT NULL DEFAULT 42
+);
+
+--echo # There is no promotion
+SHOW CREATE TABLE t1;
+
+LOAD DATA INFILE "file2.dat" INTO table t1;
+
+--query_vertical SELECT * FROM t1
+delete from t1;
+
+alter table t1
+modify f TIMESTAMP NULL default CURRENT_TIMESTAMP;
+
+--echo # There is no promotion
+SHOW CREATE TABLE t1;
+
+LOAD DATA INFILE "file2.dat" INTO table t1;
+
+--query_vertical SELECT * FROM t1
+delete from t1;
+
+--echo # Conclusion derived from trunk's results:
+--echo # DATETIME NULL (a,b) gets NULL,
+--echo # DATETIME NOT NULL (c) gets 0000-00-00,
+--echo # TIMESTAMP NULL (d,f,g) gets NULL,
+--echo # TIMESTAMP NOT NULL (e) gets NOW.
+
+drop table t1;
+remove_file $MYSQLD_DATADIR/test/file1.dat;
+remove_file $MYSQLD_DATADIR/test/file2.dat;
+
+--echo #
+--echo # Test of updatable views with check options. The option can be violated
+--echo # using ON UPDATE updates which is very strange as this offers a loophole
+--echo # in this integrity check.
+--echo #
+SET TIME_ZONE = "+03:00";
+--echo # 1970-01-01 03:16:40
+SET TIMESTAMP = 1000.123456;
+
+eval CREATE TABLE t1 ( a INT, b $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp) ENGINE = INNODB;
+
+SHOW CREATE TABLE t1;
+
+INSERT INTO t1 ( a ) VALUES ( 1 );
+
+SELECT * FROM t1;
+
+eval CREATE VIEW v1 AS SELECT * FROM t1 WHERE b <= '1970-01-01 03:16:40.123456'
+WITH CHECK OPTION;
+
+SELECT * FROM v1;
+
+--echo # 1970-01-01 03:33:20
+SET TIMESTAMP = 2000.000234;
+
+--error ER_VIEW_CHECK_FAILED
+UPDATE v1 SET a = 2;
+SELECT * FROM t1;
+
+DROP VIEW v1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT '1973-08-14 09:11:22.089786' ON UPDATE $current_timestamp,
+ c INT KEY
+);
+--echo # 1973-08-14 09:11:22 UTC
+SET TIMESTAMP = 114167482.534231;
+INSERT INTO t1 ( c ) VALUES ( 1 );
+
+eval CREATE VIEW v1 AS
+SELECT *
+FROM t1
+WHERE a >= '1973-08-14 09:11:22'
+WITH LOCAL CHECK OPTION;
+
+SELECT * FROM v1;
+
+SET TIMESTAMP = 1.126789;
+
+--error ER_VIEW_CHECK_FAILED
+INSERT INTO v1 ( c ) VALUES ( 1 ) ON DUPLICATE KEY UPDATE c = 2;
+
+SELECT * FROM v1;
+
+DROP VIEW v1;
+DROP TABLE t1;
+
+--echo #
+--echo # Bug 13095459 - MULTI-TABLE UPDATE MODIFIES A ROW TWICE
+--echo #
+eval CREATE TABLE t1 (
+ a INT,
+ b INT,
+ ts $timestamp NOT NULL DEFAULT $current_timestamp ON UPDATE $current_timestamp,
+ PRIMARY KEY ( a, ts )
+) ENGINE = INNODB;
+INSERT INTO t1( a, b, ts ) VALUES ( 1, 0, '2000-09-28 17:44:34' );
+
+eval CREATE TABLE t2 ( a INT ) ENGINE = INNODB;
+INSERT INTO t2 VALUES ( 1 );
+
+UPDATE t1 STRAIGHT_JOIN t2
+SET t1.b = t1.b + 1
+WHERE t1.a = 1 AND t1.ts >= '2000-09-28 00:00:00';
+
+SELECT b FROM t1;
+
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Bug#11745578: 17392: ALTER TABLE ADD COLUMN TIMESTAMP DEFAULT
+--echo # CURRENT_TIMESTAMP INSERTS ZERO
+--echo #
+SET timestamp = 1000;
+
+CREATE TABLE t1 ( b INT );
+INSERT INTO t1 VALUES (1);
+
+eval ALTER TABLE t1 ADD COLUMN a6 $datetime DEFAULT $now ON UPDATE $now FIRST;
+eval ALTER TABLE t1 ADD COLUMN a5 $datetime DEFAULT $now FIRST;
+eval ALTER TABLE t1 ADD COLUMN a4 $datetime ON UPDATE $now FIRST;
+
+eval ALTER TABLE t1 ADD COLUMN a3 $timestamp NOT NULL DEFAULT $now ON UPDATE $now FIRST;
+eval ALTER TABLE t1 ADD COLUMN a2 $timestamp NOT NULL DEFAULT $now FIRST;
+eval ALTER TABLE t1 ADD COLUMN a1 $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $now FIRST;
+
+eval ALTER TABLE t1 ADD COLUMN c1 $timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE $now AFTER b;
+eval ALTER TABLE t1 ADD COLUMN c2 $timestamp NOT NULL DEFAULT $now AFTER c1;
+eval ALTER TABLE t1 ADD COLUMN c3 $timestamp NOT NULL DEFAULT $now ON UPDATE $now AFTER c2;
+
+eval ALTER TABLE t1 ADD COLUMN c4 $datetime ON UPDATE $now AFTER c3;
+eval ALTER TABLE t1 ADD COLUMN c5 $datetime DEFAULT $now AFTER c4;
+eval ALTER TABLE t1 ADD COLUMN c6 $datetime DEFAULT $now ON UPDATE $now AFTER c5;
+
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+
+eval CREATE TABLE t1 ( a $timestamp NOT NULL DEFAULT $now ON UPDATE $current_timestamp, b $datetime DEFAULT $now );
+INSERT INTO t1 VALUES ();
+
+SET timestamp = 1000000000;
+
+ALTER TABLE t1 MODIFY COLUMN a TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP(3);
+ALTER TABLE t1 MODIFY COLUMN b DATETIME(3) DEFAULT CURRENT_TIMESTAMP(3);
+
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+
+eval CREATE TABLE t1 (
+ a $timestamp NOT NULL DEFAULT '1999-12-01 11:22:33' ON UPDATE $current_timestamp,
+ b $datetime DEFAULT '1999-12-01 11:22:33'
+);
+INSERT INTO t1 VALUES ();
+
+eval ALTER TABLE t1 MODIFY COLUMN a $timestamp DEFAULT $now;
+eval ALTER TABLE t1 MODIFY COLUMN b $datetime DEFAULT $now;
+INSERT INTO t1 VALUES ();
+
+SELECT * FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/include/function_defaults_notembedded.inc b/mysql-test/include/function_defaults_notembedded.inc
new file mode 100644
index 00000000000..d9708c13da5
--- /dev/null
+++ b/mysql-test/include/function_defaults_notembedded.inc
@@ -0,0 +1,94 @@
+SET TIME_ZONE = "+00:00";
+
+--echo #
+--echo # Test of INSERT DELAYED ... SET ...
+--echo #
+
+--echo # 2011-04-19 08:02:40 UTC
+SET TIMESTAMP = 1303200160.123456;
+
+eval CREATE TABLE t1 ( a INT, b $timestamp NOT NULL DEFAULT CURRENT_$timestamp ON UPDATE CURRENT_$timestamp);
+
+INSERT DELAYED INTO t1 SET a = 1;
+FLUSH TABLE t1;
+
+SELECT * FROM t1;
+SELECT * FROM t1 WHERE b = 0;
+
+INSERT DELAYED INTO t1 SET a = 2, b = '1980-01-02 10:20:30.405060';
+FLUSH TABLE t1;
+
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Test of INSERT DELAYED ... VALUES ...
+--echo #
+
+--echo # 2011-04-19 08:04:01 UTC
+SET TIMESTAMP = 1303200241.234567;
+
+eval CREATE TABLE t1 ( a INT, b $timestamp NOT NULL DEFAULT CURRENT_$timestamp ON UPDATE CURRENT_$timestamp);
+
+INSERT DELAYED INTO t1 ( a ) VALUES (1);
+FLUSH TABLE t1;
+SELECT * FROM t1;
+
+INSERT DELAYED INTO t1 VALUES (2, '1977-12-19 12:34:56.789123');
+FLUSH TABLE t1;
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Test of a delayed insert handler servicing two insert operations
+--echo # with different sets of active defaults.
+--echo #
+eval CREATE TABLE t1 ( a INT, b $timestamp NOT NULL DEFAULT CURRENT_$timestamp ON UPDATE CURRENT_$timestamp);
+
+--connect(con1, localhost, root,,)
+--echo # 2011-04-19 08:04:01 UTC
+SET TIMESTAMP = 1303200241.345678;
+SET debug_sync = 'before_write_delayed SIGNAL parked WAIT_FOR go';
+--send INSERT DELAYED INTO t1 ( a ) VALUES (1), (2), (3)
+
+--connection default
+SET debug_sync = 'now WAIT_FOR parked';
+
+--connect(con2, localhost, root,,)
+--echo # 2011-04-19 08:04:01 UTC
+SET TIME_ZONE="+03:00";
+SET TIMESTAMP = 1303200241.456789;
+--send INSERT DELAYED INTO t1 ( a, b ) VALUES (4, '1977-12-19 12:34:56.789123'), (5, '1977-12-19 12:34:57.891234'), (6, '1977-12-19 12:34:58.912345')
+
+--connection default
+SET debug_sync = 'now SIGNAL go';
+
+--let $wait_condition= SELECT COUNT(*) = 6 FROM t1
+--source include/wait_condition.inc
+
+--sorted_result
+SELECT * FROM t1;
+
+--disconnect con1
+--disconnect con2
+
+DROP TABLE t1;
+
+--echo #
+--echo # Test of early activation of function defaults.
+--echo #
+
+eval CREATE TABLE t1 ( a INT, b $timestamp NOT NULL DEFAULT CURRENT_$timestamp ON UPDATE CURRENT_$timestamp);
+
+SET TIMESTAMP = 1317235172.987654; # 2011-09-28 18:39:32 UTC
+INSERT DELAYED INTO t1 ( a ) VALUES (1), (2), (3);
+
+SET TIMESTAMP = 385503754.876543; # 1982-03-20 20:22:34 UTC
+INSERT DELAYED INTO t1 ( a ) VALUES (4), (5), (6);
+
+FLUSH TABLE t1;
+SELECT * FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/include/have_cassandra.inc b/mysql-test/include/have_cassandra.inc
new file mode 100644
index 00000000000..d358e2ecc26
--- /dev/null
+++ b/mysql-test/include/have_cassandra.inc
@@ -0,0 +1,10 @@
+#
+# suite.pm will make sure that all tests including this file
+# will be skipped unless innodb or xtradb is enabled
+#
+# The test below is redundant
+
+if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'cassandra' AND support IN ('YES', 'DEFAULT', 'ENABLED')`)
+{
+ --skip Test requires Cassandra.
+}
diff --git a/mysql-test/include/have_cassandra.opt b/mysql-test/include/have_cassandra.opt
new file mode 100644
index 00000000000..98a4a081de5
--- /dev/null
+++ b/mysql-test/include/have_cassandra.opt
@@ -0,0 +1 @@
+--plugin-load=$HA_CASSANDRA_SO --loose-cassandra=on
diff --git a/mysql-test/include/have_dbi_dbd-mysql.inc b/mysql-test/include/have_dbi_dbd-mysql.inc
index 212e36ac353..7c2113a8109 100644
--- a/mysql-test/include/have_dbi_dbd-mysql.inc
+++ b/mysql-test/include/have_dbi_dbd-mysql.inc
@@ -58,9 +58,7 @@
--let $perlChecker= $MYSQLTEST_VARDIR/std_data/checkDBI_DBD-mysql.pl
--let $resultFile= $MYSQL_TMP_DIR/dbidbd-mysql.txt
-# Make the script executable and execute it.
---chmod 0755 $perlChecker
---exec $perlChecker
+--exec perl $perlChecker
# Source the resulting temporary file and look for a variable being set.
--source $resultFile
diff --git a/mysql-test/include/have_debug_sync.inc b/mysql-test/include/have_debug_sync.inc
index 7aa5baf3342..8efec7dad95 100644
--- a/mysql-test/include/have_debug_sync.inc
+++ b/mysql-test/include/have_debug_sync.inc
@@ -1,5 +1,4 @@
---require r/have_debug_sync.require
-disable_query_log;
-let $value= query_get_value(SHOW VARIABLES LIKE 'debug_sync', Value, 1);
-eval SELECT ('$value' LIKE 'ON %') AS debug_sync;
-enable_query_log;
+if (`SELECT COUNT(*) = 0 FROM information_schema.session_variables WHERE
+ variable_name = 'debug_sync' AND variable_value LIKE 'ON %'`) {
+ --skip Needs debug_sync enabled
+}
diff --git a/mysql-test/include/have_stat_tables.inc b/mysql-test/include/have_stat_tables.inc
new file mode 100644
index 00000000000..a52a4d5ca5c
--- /dev/null
+++ b/mysql-test/include/have_stat_tables.inc
@@ -0,0 +1,5 @@
+if (`select count(*) < 3 from information_schema.tables
+ where table_schema = 'mysql' and table_name in ('table_stats','column_stats','index_stats')`)
+{
+ --skip Needs stat tables
+}
diff --git a/mysql-test/include/have_stat_tables.opt b/mysql-test/include/have_stat_tables.opt
new file mode 100644
index 00000000000..addda71619d
--- /dev/null
+++ b/mysql-test/include/have_stat_tables.opt
@@ -0,0 +1 @@
+--use-stat-tables='complementary'
diff --git a/mysql-test/include/mtr_warnings.sql b/mysql-test/include/mtr_warnings.sql
index 37a17a8133e..d957e33d41c 100644
--- a/mysql-test/include/mtr_warnings.sql
+++ b/mysql-test/include/mtr_warnings.sql
@@ -224,6 +224,8 @@ INSERT INTO global_suppressions VALUES
("Slave I/O: The slave I/O thread stops because a fatal error is encountered when it tried to SET @master_binlog_checksum on master.*"),
("Slave I/O: Get master BINLOG_CHECKSUM failed with error.*"),
("Slave I/O: Notifying master by SET @master_binlog_checksum= @@global.binlog_checksum failed with error.*"),
+ ("Slave I/O: Setting master-side filtering of @@skip_replication failed with error:.*"),
+ ("Slave I/O: Setting @mariadb_slave_capability failed with error:.*"),
("THE_LAST_SUPPRESSION")||
diff --git a/mysql-test/include/not_openssl.inc b/mysql-test/include/not_openssl.inc
deleted file mode 100644
index afe2ed37c28..00000000000
--- a/mysql-test/include/not_openssl.inc
+++ /dev/null
@@ -1,4 +0,0 @@
--- require r/not_openssl.require
-disable_query_log;
-show variables like "have_openssl";
-enable_query_log;
diff --git a/mysql-test/include/not_ssl.inc b/mysql-test/include/not_ssl.inc
new file mode 100644
index 00000000000..99866923621
--- /dev/null
+++ b/mysql-test/include/not_ssl.inc
@@ -0,0 +1,4 @@
+if (`select @@have_ssl = 'YES'`)
+{
+ skip only without SSL;
+}
diff --git a/mysql-test/include/setup_fake_relay_log.inc b/mysql-test/include/setup_fake_relay_log.inc
index 4f1d4f6f162..a1964572427 100644
--- a/mysql-test/include/setup_fake_relay_log.inc
+++ b/mysql-test/include/setup_fake_relay_log.inc
@@ -74,7 +74,6 @@ let $_fake_relay_index= $_fake_datadir/$_fake_filename.index;
let $_fake_relay_log_purge= `SELECT @@global.relay_log_purge`;
RESET SLAVE;
-let $_orphan_relay_file= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1);
# Create relay log file.
--copy_file $fake_relay_log $_fake_relay_log
@@ -103,8 +102,5 @@ let $_orphan_relay_file= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1);
# Setup replication from existing relay log.
eval CHANGE MASTER TO MASTER_HOST='dummy.localdomain', RELAY_LOG_FILE='$_fake_filename-fake.000001', RELAY_LOG_POS=4;
-# remove the orphan log file (became spurious)
--- remove_file $_fake_datadir/$_orphan_relay_file
-
--let $include_filename= setup_fake_relay_log.inc
--source include/end_include_file.inc
diff --git a/mysql-test/include/show_binlog_events.inc b/mysql-test/include/show_binlog_events.inc
index e5670c054fa..5ab837f0e6b 100644
--- a/mysql-test/include/show_binlog_events.inc
+++ b/mysql-test/include/show_binlog_events.inc
@@ -9,7 +9,7 @@
#
# It shows the first binary log file if $binlog_file is not given.
#
-# It shows events from the end position of the description event if
+# It shows events from the end position of the binlog checkpoint event if
# $binlog_start is not given.
#
# It shows all of the events if $binlog_limit is not given.
diff --git a/mysql-test/include/show_binlog_events2.inc b/mysql-test/include/show_binlog_events2.inc
index c32d12537fd..1fd9ce96d53 100644
--- a/mysql-test/include/show_binlog_events2.inc
+++ b/mysql-test/include/show_binlog_events2.inc
@@ -1,4 +1,4 @@
---let $binlog_start=245
+--let $binlog_start=246
--replace_result $binlog_start <binlog_start>
--replace_column 2 # 5 #
--replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/
diff --git a/mysql-test/include/show_events.inc b/mysql-test/include/show_events.inc
index ff5a7105c24..878773d2b5b 100644
--- a/mysql-test/include/show_events.inc
+++ b/mysql-test/include/show_events.inc
@@ -3,13 +3,28 @@
# It is only called by show_binlog_events.inc and show_relaylog_events.inc.
##############################################################################
+# Do not modify $binlog_start - if we did, it could wrongly persist until a
+# later call of show_events.inc.
+if ($binlog_start)
+{
+ --let $_binlog_start= $binlog_start
+}
if (!$binlog_start)
{
- # If $binlog_start is not set, we will set it as the second event's
- # position. The first event(Description Event) is always ignored. For
- # description event's length might be changed because of adding new events,
- # 'SHOW BINLOG EVENTS LIMIT 1' is used to get the right value.
- --let $binlog_start= query_get_value(SHOW BINLOG EVENTS LIMIT 1, End_log_pos, 1)
+ # If $binlog_start is not set, we will set it as the third event's
+ # position (second in relay log which has not Binlog Checkpoing event).
+ # The first two events (Description Event and Binlog Checkpoint
+ # event) are always ignored. For description event's length might be changed
+ # because of adding new events, 'SHOW BINLOG EVENTS LIMIT 2' is used to get
+ # the right value.
+ if ($is_relay_log)
+ {
+ --let $_binlog_start= query_get_value(SHOW BINLOG EVENTS LIMIT 1, End_log_pos, 1)
+ }
+ if (!$is_relay_log)
+ {
+ --let $_binlog_start= query_get_value(SHOW BINLOG EVENTS LIMIT 2, End_log_pos, 2)
+ }
}
--let $_statement=show binlog events
@@ -23,7 +38,7 @@ if ($binlog_file)
--let $_statement= $_statement in '$binlog_file'
}
---let $_statement= $_statement from $binlog_start
+--let $_statement= $_statement from $_binlog_start
# Cannot use if($binlog_limit) since the variable may begin with a 0
@@ -32,7 +47,7 @@ if (`SELECT '$binlog_limit' <> ''`)
--let $_statement= $_statement limit $binlog_limit
}
---replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR $binlog_start <binlog_start>
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR $_binlog_start <binlog_start>
--replace_column 2 # 4 # 5 #
--replace_regex /\/\* xid=.* \*\//\/* XID *\// /table_id: [0-9]+/table_id: #/ /file_id=[0-9]+/file_id=#/ /block_len=[0-9]+/block_len=#/ /Server ver:.*$/SERVER_VERSION, BINLOG_VERSION/
--eval $_statement
diff --git a/mysql-test/include/system_db_struct.inc b/mysql-test/include/system_db_struct.inc
index 123c82484b9..9467c023dba 100644
--- a/mysql-test/include/system_db_struct.inc
+++ b/mysql-test/include/system_db_struct.inc
@@ -16,3 +16,6 @@ show create table proc;
show create table event;
show create table general_log;
show create table slow_log;
+show create table table_stats;
+show create table column_stats;
+show create table index_stats;
diff --git a/mysql-test/include/wait_for_binlog_checkpoint.inc b/mysql-test/include/wait_for_binlog_checkpoint.inc
new file mode 100644
index 00000000000..960cf4e41b1
--- /dev/null
+++ b/mysql-test/include/wait_for_binlog_checkpoint.inc
@@ -0,0 +1,53 @@
+# include/wait_for_binlog_checkpoint.inc
+#
+# SUMMARY
+#
+# Wait until binlog checkpoint has been logged for current binlog file.
+# This is useful to avoid races with output difference for binlog
+# checkpoints, as these are logged asynchronously from the binlog
+# background thread.
+#
+# USAGE:
+#
+# --source include/wait_for_binlog_checkpoint.inc
+
+let $_wait_count= 300;
+
+let $_found= 0;
+
+while ($_wait_count)
+{
+ dec $_wait_count;
+ let $_cur_binlog= query_get_value(SHOW MASTER STATUS, File, 1);
+ let $_more= 1;
+ let $_row= 1;
+ while ($_more)
+ {
+ let $_event= query_get_value(SHOW BINLOG EVENTS IN "$_cur_binlog", Event_type, $_row);
+ if ($_event == "No such row")
+ {
+ let $_more= 0;
+ }
+ if ($_event == "Binlog_checkpoint")
+ {
+ let $_info= query_get_value(SHOW BINLOG EVENTS IN "$_cur_binlog", Info, $_row);
+ if (`SELECT INSTR("$_info", "$_cur_binlog") != 0`)
+ {
+ let $_more= 0;
+ let $_wait_count= 0;
+ let $_found= 1;
+ }
+ }
+ inc $_row;
+ }
+ if ($_wait_count)
+ {
+ real_sleep 0.1;
+ }
+}
+
+if (!$_found)
+{
+ eval SHOW BINLOG EVENTS IN "$_cur_binlog";
+ --die ERROR: failed while waiting for binlog checkpoint $_cur_binlog
+}
diff --git a/mysql-test/include/world_schema_utf8.inc b/mysql-test/include/world_schema_utf8.inc
new file mode 100644
index 00000000000..2a09c5d32d5
--- /dev/null
+++ b/mysql-test/include/world_schema_utf8.inc
@@ -0,0 +1,25 @@
+CREATE TABLE Country (
+ Code char(3) NOT NULL default '',
+ Name char(52) NOT NULL default '',
+ SurfaceArea float(10,2) NOT NULL default '0.00',
+ Population int(11) NOT NULL default '0',
+ Capital int(11) default NULL,
+ PRIMARY KEY (Code),
+ UNIQUE INDEX (Name)
+) CHARACTER SET utf8 COLLATE utf8_bin;
+CREATE TABLE City (
+ ID int(11) NOT NULL auto_increment,
+ Name char(35) NOT NULL default '',
+ Country char(3) NOT NULL default '',
+ Population int(11) NOT NULL default '0',
+ PRIMARY KEY (ID),
+ INDEX (Population),
+ INDEX (Country)
+) CHARACTER SET utf8 COLLATE utf8_bin;
+CREATE TABLE CountryLanguage (
+ Country char(3) NOT NULL default '',
+ Language char(30) NOT NULL default '',
+ Percentage float(3,1) NOT NULL default '0.0',
+ PRIMARY KEY (Country, Language),
+ INDEX (Percentage)
+) CHARACTER SET utf8 COLLATE utf8_bin;
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index e2331e2e454..ac911c5e4e8 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -174,6 +174,7 @@ my $DEFAULT_SUITES= join(',', map { "$_-" } qw(
heap
innodb
maria
+ multi_source
optimizer_unfixed_bugs
oqgraph
parts
@@ -3595,9 +3596,10 @@ sub mysql_install_db {
verbose => $opt_verbose,
) != 0)
{
+ my $data= mtr_grab_file($path_bootstrap_log);
mtr_error("Error executing mysqld --bootstrap\n" .
"Could not install system database from $bootstrap_sql_file\n" .
- "see $path_bootstrap_log for errors");
+ "The $path_bootstrap_log file contains:\n$data\n");
}
}
@@ -4733,7 +4735,8 @@ sub extract_warning_lines ($$) {
qr|Checking table: '\..mtr.test_suppressions'|,
qr|Table \./test/bug53592 has a primary key in InnoDB data dictionary, but not in MySQL|,
qr|Table '\..mtr.test_suppressions' is marked as crashed and should be repaired|,
- qr|Can't open shared library.*ha_archive|,
+ qr|Can't open shared library|,
+ qr|Couldn't load plugin named .*EXAMPLE.*|,
qr|InnoDB: Error: table 'test/bug39438'|,
qr| entry '.*' ignored in --skip-name-resolve mode|,
qr|mysqld got signal 6|,
diff --git a/mysql-test/r/1st.result b/mysql-test/r/1st.result
index 792d9eaf2f1..a948031ea2e 100644
--- a/mysql-test/r/1st.result
+++ b/mysql-test/r/1st.result
@@ -7,6 +7,7 @@ performance_schema
test
show tables in mysql;
Tables_in_mysql
+column_stats
columns_priv
db
event
@@ -17,6 +18,7 @@ help_keyword
help_relation
help_topic
host
+index_stats
ndb_binlog_index
plugin
proc
@@ -24,6 +26,7 @@ procs_priv
proxies_priv
servers
slow_log
+table_stats
tables_priv
time_zone
time_zone_leap_second
diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result
index 624cc7afa98..b2cfd079ff5 100644
--- a/mysql-test/r/alter_table.result
+++ b/mysql-test/r/alter_table.result
@@ -176,12 +176,12 @@ create table t1 (a int, b int);
alter table t1 add unique (a,b), add key (b);
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t1 0 a 1 a A NULL NULL NULL YES BTREE
-t1 0 a 2 b A NULL NULL NULL YES BTREE
+t1 0 a 1 a A 3 NULL NULL YES BTREE
+t1 0 a 2 b A 300 NULL NULL YES BTREE
t1 1 b 1 b A 100 NULL NULL YES BTREE
analyze table t1;
Table Op Msg_type Msg_text
-test.t1 analyze status OK
+test.t1 analyze status Table is already up to date
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 0 a 1 a A 3 NULL NULL YES BTREE
diff --git a/mysql-test/r/alter_table_mdev539_maria.result b/mysql-test/r/alter_table_mdev539_maria.result
new file mode 100644
index 00000000000..703908825d2
--- /dev/null
+++ b/mysql-test/r/alter_table_mdev539_maria.result
@@ -0,0 +1,252 @@
+#
+set @@storage_engine= Aria;
+#
+# mdev-539: fast build of unique/primary indexes for MyISAM/Aria
+#
+DROP DATABASE IF EXISTS dbt3_s001;
+CREATE DATABASE dbt3_s001;
+use dbt3_s001;
+drop index `primary` on lineitem;
+show create table lineitem;
+Table Create Table
+lineitem CREATE TABLE `lineitem` (
+ `l_orderkey` int(11) NOT NULL DEFAULT '0',
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL DEFAULT '0',
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ KEY `i_l_shipdate` (`l_shipDATE`),
+ KEY `i_l_suppkey_partkey` (`l_partkey`,`l_suppkey`),
+ KEY `i_l_partkey` (`l_partkey`),
+ KEY `i_l_suppkey` (`l_suppkey`),
+ KEY `i_l_receiptdate` (`l_receiptDATE`),
+ KEY `i_l_orderkey` (`l_orderkey`),
+ KEY `i_l_orderkey_quantity` (`l_orderkey`,`l_quantity`),
+ KEY `i_l_commitdate` (`l_commitDATE`)
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+alter table lineitem add primary key (l_orderkey, l_linenumber);
+show create table lineitem;
+Table Create Table
+lineitem CREATE TABLE `lineitem` (
+ `l_orderkey` int(11) NOT NULL DEFAULT '0',
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL DEFAULT '0',
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ PRIMARY KEY (`l_orderkey`,`l_linenumber`),
+ KEY `i_l_shipdate` (`l_shipDATE`),
+ KEY `i_l_suppkey_partkey` (`l_partkey`,`l_suppkey`),
+ KEY `i_l_partkey` (`l_partkey`),
+ KEY `i_l_suppkey` (`l_suppkey`),
+ KEY `i_l_receiptdate` (`l_receiptDATE`),
+ KEY `i_l_orderkey` (`l_orderkey`),
+ KEY `i_l_orderkey_quantity` (`l_orderkey`,`l_quantity`),
+ KEY `i_l_commitdate` (`l_commitDATE`)
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+drop index `primary` on lineitem;
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+1 68 9 2 36 34850.16 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+insert into lineitem values
+(1,68,9,2,36,34850.16,0.07,0.06,'N','O','1996-04-12','1996-02-28','1996-04-20','TAKE BACK RETURN','MAIL','slyly bold pinto beans detect s');
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+1 68 9 2 36 34850.16 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+1 68 9 2 36 34850.16 0.07 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+alter table lineitem add primary key (l_orderkey, l_linenumber);
+ERROR 23000: Duplicate entry '1-2' for key 'PRIMARY'
+show create table lineitem;
+Table Create Table
+lineitem CREATE TABLE `lineitem` (
+ `l_orderkey` int(11) NOT NULL DEFAULT '0',
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL DEFAULT '0',
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ KEY `i_l_shipdate` (`l_shipDATE`),
+ KEY `i_l_suppkey_partkey` (`l_partkey`,`l_suppkey`),
+ KEY `i_l_partkey` (`l_partkey`),
+ KEY `i_l_suppkey` (`l_suppkey`),
+ KEY `i_l_receiptdate` (`l_receiptDATE`),
+ KEY `i_l_orderkey` (`l_orderkey`),
+ KEY `i_l_orderkey_quantity` (`l_orderkey`,`l_quantity`),
+ KEY `i_l_commitdate` (`l_commitDATE`)
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+1 68 9 2 36 34850.16 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+1 68 9 2 36 34850.16 0.07 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+delete from lineitem where l_orderkey=1 and l_linenumber=2 and l_discount=0.07;
+alter table lineitem add primary key (l_orderkey, l_linenumber);
+show create table lineitem;
+Table Create Table
+lineitem CREATE TABLE `lineitem` (
+ `l_orderkey` int(11) NOT NULL DEFAULT '0',
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL DEFAULT '0',
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ PRIMARY KEY (`l_orderkey`,`l_linenumber`),
+ KEY `i_l_shipdate` (`l_shipDATE`),
+ KEY `i_l_suppkey_partkey` (`l_partkey`,`l_suppkey`),
+ KEY `i_l_partkey` (`l_partkey`),
+ KEY `i_l_suppkey` (`l_suppkey`),
+ KEY `i_l_receiptdate` (`l_receiptDATE`),
+ KEY `i_l_orderkey` (`l_orderkey`),
+ KEY `i_l_orderkey_quantity` (`l_orderkey`,`l_quantity`),
+ KEY `i_l_commitdate` (`l_commitDATE`)
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+1 68 9 2 36 34850.16 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+create unique index i_c_name on customer(c_name);
+show create table customer;
+Table Create Table
+customer CREATE TABLE `customer` (
+ `c_custkey` int(11) NOT NULL,
+ `c_name` varchar(25) DEFAULT NULL,
+ `c_address` varchar(40) DEFAULT NULL,
+ `c_nationkey` int(11) DEFAULT NULL,
+ `c_phone` char(15) DEFAULT NULL,
+ `c_acctbal` double DEFAULT NULL,
+ `c_mktsegment` char(10) DEFAULT NULL,
+ `c_comment` varchar(117) DEFAULT NULL,
+ PRIMARY KEY (`c_custkey`),
+ UNIQUE KEY `i_c_name` (`c_name`),
+ KEY `i_c_nationkey` (`c_nationkey`)
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+select * from customer where c_name='Customer#000000003';
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+drop index i_c_name on customer;
+insert into customer values
+(303,'Customer#000000003','MG9kdTD2WBHm',1,'11-719-748-3364',7498.12,'AUTOMOBILE','special packages wake. slyly reg');
+select * from customer where c_name='Customer#000000003';
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+303 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+alter table customer add unique index i_c_name(c_name);
+ERROR 23000: Duplicate entry 'Customer#000000003' for key 'i_c_name'
+show create table customer;
+Table Create Table
+customer CREATE TABLE `customer` (
+ `c_custkey` int(11) NOT NULL,
+ `c_name` varchar(25) DEFAULT NULL,
+ `c_address` varchar(40) DEFAULT NULL,
+ `c_nationkey` int(11) DEFAULT NULL,
+ `c_phone` char(15) DEFAULT NULL,
+ `c_acctbal` double DEFAULT NULL,
+ `c_mktsegment` char(10) DEFAULT NULL,
+ `c_comment` varchar(117) DEFAULT NULL,
+ PRIMARY KEY (`c_custkey`),
+ KEY `i_c_nationkey` (`c_nationkey`)
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+select * from customer where c_name='Customer#000000003';
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+303 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+delete from customer where c_custkey=303;
+select * from customer where c_name='Customer#000000003';
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+alter table customer add unique index i_c_name(c_name);
+show create table customer;
+Table Create Table
+customer CREATE TABLE `customer` (
+ `c_custkey` int(11) NOT NULL,
+ `c_name` varchar(25) DEFAULT NULL,
+ `c_address` varchar(40) DEFAULT NULL,
+ `c_nationkey` int(11) DEFAULT NULL,
+ `c_phone` char(15) DEFAULT NULL,
+ `c_acctbal` double DEFAULT NULL,
+ `c_mktsegment` char(10) DEFAULT NULL,
+ `c_comment` varchar(117) DEFAULT NULL,
+ PRIMARY KEY (`c_custkey`),
+ UNIQUE KEY `i_c_name` (`c_name`),
+ KEY `i_c_nationkey` (`c_nationkey`)
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+select * from customer where c_name='Customer#000000003';
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+drop index `primary` on customer;
+show create table customer;
+Table Create Table
+customer CREATE TABLE `customer` (
+ `c_custkey` int(11) NOT NULL,
+ `c_name` varchar(25) DEFAULT NULL,
+ `c_address` varchar(40) DEFAULT NULL,
+ `c_nationkey` int(11) DEFAULT NULL,
+ `c_phone` char(15) DEFAULT NULL,
+ `c_acctbal` double DEFAULT NULL,
+ `c_mktsegment` char(10) DEFAULT NULL,
+ `c_comment` varchar(117) DEFAULT NULL,
+ UNIQUE KEY `i_c_name` (`c_name`),
+ KEY `i_c_nationkey` (`c_nationkey`)
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+insert into customer values
+(3,'Customer#000000303','MG9kdTD2WBHm',1,'11-719-748-3364',7498.12,'AUTOMOBILE','special packages wake. slyly reg');
+alter ignore table customer add primary key (c_custkey);
+show create table customer;
+Table Create Table
+customer CREATE TABLE `customer` (
+ `c_custkey` int(11) NOT NULL,
+ `c_name` varchar(25) DEFAULT NULL,
+ `c_address` varchar(40) DEFAULT NULL,
+ `c_nationkey` int(11) DEFAULT NULL,
+ `c_phone` char(15) DEFAULT NULL,
+ `c_acctbal` double DEFAULT NULL,
+ `c_mktsegment` char(10) DEFAULT NULL,
+ `c_comment` varchar(117) DEFAULT NULL,
+ PRIMARY KEY (`c_custkey`),
+ UNIQUE KEY `i_c_name` (`c_name`),
+ KEY `i_c_nationkey` (`c_nationkey`)
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
+select * from customer where c_custkey=3;
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+DROP DATABASE dbt3_s001;
+set @@storage_engine= default;
diff --git a/mysql-test/r/alter_table_mdev539_myisam.result b/mysql-test/r/alter_table_mdev539_myisam.result
new file mode 100644
index 00000000000..7140c544836
--- /dev/null
+++ b/mysql-test/r/alter_table_mdev539_myisam.result
@@ -0,0 +1,252 @@
+#
+set @@storage_engine= MyISAM;
+#
+# mdev-539: fast build of unique/primary indexes for MyISAM/Aria
+#
+DROP DATABASE IF EXISTS dbt3_s001;
+CREATE DATABASE dbt3_s001;
+use dbt3_s001;
+drop index `primary` on lineitem;
+show create table lineitem;
+Table Create Table
+lineitem CREATE TABLE `lineitem` (
+ `l_orderkey` int(11) NOT NULL DEFAULT '0',
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL DEFAULT '0',
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ KEY `i_l_shipdate` (`l_shipDATE`),
+ KEY `i_l_suppkey_partkey` (`l_partkey`,`l_suppkey`),
+ KEY `i_l_partkey` (`l_partkey`),
+ KEY `i_l_suppkey` (`l_suppkey`),
+ KEY `i_l_receiptdate` (`l_receiptDATE`),
+ KEY `i_l_orderkey` (`l_orderkey`),
+ KEY `i_l_orderkey_quantity` (`l_orderkey`,`l_quantity`),
+ KEY `i_l_commitdate` (`l_commitDATE`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+alter table lineitem add primary key (l_orderkey, l_linenumber);
+show create table lineitem;
+Table Create Table
+lineitem CREATE TABLE `lineitem` (
+ `l_orderkey` int(11) NOT NULL DEFAULT '0',
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL DEFAULT '0',
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ PRIMARY KEY (`l_orderkey`,`l_linenumber`),
+ KEY `i_l_shipdate` (`l_shipDATE`),
+ KEY `i_l_suppkey_partkey` (`l_partkey`,`l_suppkey`),
+ KEY `i_l_partkey` (`l_partkey`),
+ KEY `i_l_suppkey` (`l_suppkey`),
+ KEY `i_l_receiptdate` (`l_receiptDATE`),
+ KEY `i_l_orderkey` (`l_orderkey`),
+ KEY `i_l_orderkey_quantity` (`l_orderkey`,`l_quantity`),
+ KEY `i_l_commitdate` (`l_commitDATE`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop index `primary` on lineitem;
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+1 68 9 2 36 34850.16 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+insert into lineitem values
+(1,68,9,2,36,34850.16,0.07,0.06,'N','O','1996-04-12','1996-02-28','1996-04-20','TAKE BACK RETURN','MAIL','slyly bold pinto beans detect s');
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+1 68 9 2 36 34850.16 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+1 68 9 2 36 34850.16 0.07 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+alter table lineitem add primary key (l_orderkey, l_linenumber);
+ERROR 23000: Duplicate entry '1-2' for key 'PRIMARY'
+show create table lineitem;
+Table Create Table
+lineitem CREATE TABLE `lineitem` (
+ `l_orderkey` int(11) NOT NULL DEFAULT '0',
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL DEFAULT '0',
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ KEY `i_l_shipdate` (`l_shipDATE`),
+ KEY `i_l_suppkey_partkey` (`l_partkey`,`l_suppkey`),
+ KEY `i_l_partkey` (`l_partkey`),
+ KEY `i_l_suppkey` (`l_suppkey`),
+ KEY `i_l_receiptdate` (`l_receiptDATE`),
+ KEY `i_l_orderkey` (`l_orderkey`),
+ KEY `i_l_orderkey_quantity` (`l_orderkey`,`l_quantity`),
+ KEY `i_l_commitdate` (`l_commitDATE`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+1 68 9 2 36 34850.16 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+1 68 9 2 36 34850.16 0.07 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+delete from lineitem where l_orderkey=1 and l_linenumber=2 and l_discount=0.07;
+alter table lineitem add primary key (l_orderkey, l_linenumber);
+show create table lineitem;
+Table Create Table
+lineitem CREATE TABLE `lineitem` (
+ `l_orderkey` int(11) NOT NULL DEFAULT '0',
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL DEFAULT '0',
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ PRIMARY KEY (`l_orderkey`,`l_linenumber`),
+ KEY `i_l_shipdate` (`l_shipDATE`),
+ KEY `i_l_suppkey_partkey` (`l_partkey`,`l_suppkey`),
+ KEY `i_l_partkey` (`l_partkey`),
+ KEY `i_l_suppkey` (`l_suppkey`),
+ KEY `i_l_receiptdate` (`l_receiptDATE`),
+ KEY `i_l_orderkey` (`l_orderkey`),
+ KEY `i_l_orderkey_quantity` (`l_orderkey`,`l_quantity`),
+ KEY `i_l_commitdate` (`l_commitDATE`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+select * from lineitem where l_orderkey=1 and l_linenumber=2;
+l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+1 68 9 2 36 34850.16 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+create unique index i_c_name on customer(c_name);
+show create table customer;
+Table Create Table
+customer CREATE TABLE `customer` (
+ `c_custkey` int(11) NOT NULL,
+ `c_name` varchar(25) DEFAULT NULL,
+ `c_address` varchar(40) DEFAULT NULL,
+ `c_nationkey` int(11) DEFAULT NULL,
+ `c_phone` char(15) DEFAULT NULL,
+ `c_acctbal` double DEFAULT NULL,
+ `c_mktsegment` char(10) DEFAULT NULL,
+ `c_comment` varchar(117) DEFAULT NULL,
+ PRIMARY KEY (`c_custkey`),
+ UNIQUE KEY `i_c_name` (`c_name`),
+ KEY `i_c_nationkey` (`c_nationkey`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+select * from customer where c_name='Customer#000000003';
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+drop index i_c_name on customer;
+insert into customer values
+(303,'Customer#000000003','MG9kdTD2WBHm',1,'11-719-748-3364',7498.12,'AUTOMOBILE','special packages wake. slyly reg');
+select * from customer where c_name='Customer#000000003';
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+303 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+alter table customer add unique index i_c_name(c_name);
+ERROR 23000: Duplicate entry 'Customer#000000003' for key 'i_c_name'
+show create table customer;
+Table Create Table
+customer CREATE TABLE `customer` (
+ `c_custkey` int(11) NOT NULL,
+ `c_name` varchar(25) DEFAULT NULL,
+ `c_address` varchar(40) DEFAULT NULL,
+ `c_nationkey` int(11) DEFAULT NULL,
+ `c_phone` char(15) DEFAULT NULL,
+ `c_acctbal` double DEFAULT NULL,
+ `c_mktsegment` char(10) DEFAULT NULL,
+ `c_comment` varchar(117) DEFAULT NULL,
+ PRIMARY KEY (`c_custkey`),
+ KEY `i_c_nationkey` (`c_nationkey`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+select * from customer where c_name='Customer#000000003';
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+303 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+delete from customer where c_custkey=303;
+select * from customer where c_name='Customer#000000003';
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+alter table customer add unique index i_c_name(c_name);
+show create table customer;
+Table Create Table
+customer CREATE TABLE `customer` (
+ `c_custkey` int(11) NOT NULL,
+ `c_name` varchar(25) DEFAULT NULL,
+ `c_address` varchar(40) DEFAULT NULL,
+ `c_nationkey` int(11) DEFAULT NULL,
+ `c_phone` char(15) DEFAULT NULL,
+ `c_acctbal` double DEFAULT NULL,
+ `c_mktsegment` char(10) DEFAULT NULL,
+ `c_comment` varchar(117) DEFAULT NULL,
+ PRIMARY KEY (`c_custkey`),
+ UNIQUE KEY `i_c_name` (`c_name`),
+ KEY `i_c_nationkey` (`c_nationkey`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+select * from customer where c_name='Customer#000000003';
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+drop index `primary` on customer;
+show create table customer;
+Table Create Table
+customer CREATE TABLE `customer` (
+ `c_custkey` int(11) NOT NULL,
+ `c_name` varchar(25) DEFAULT NULL,
+ `c_address` varchar(40) DEFAULT NULL,
+ `c_nationkey` int(11) DEFAULT NULL,
+ `c_phone` char(15) DEFAULT NULL,
+ `c_acctbal` double DEFAULT NULL,
+ `c_mktsegment` char(10) DEFAULT NULL,
+ `c_comment` varchar(117) DEFAULT NULL,
+ UNIQUE KEY `i_c_name` (`c_name`),
+ KEY `i_c_nationkey` (`c_nationkey`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into customer values
+(3,'Customer#000000303','MG9kdTD2WBHm',1,'11-719-748-3364',7498.12,'AUTOMOBILE','special packages wake. slyly reg');
+alter ignore table customer add primary key (c_custkey);
+show create table customer;
+Table Create Table
+customer CREATE TABLE `customer` (
+ `c_custkey` int(11) NOT NULL,
+ `c_name` varchar(25) DEFAULT NULL,
+ `c_address` varchar(40) DEFAULT NULL,
+ `c_nationkey` int(11) DEFAULT NULL,
+ `c_phone` char(15) DEFAULT NULL,
+ `c_acctbal` double DEFAULT NULL,
+ `c_mktsegment` char(10) DEFAULT NULL,
+ `c_comment` varchar(117) DEFAULT NULL,
+ PRIMARY KEY (`c_custkey`),
+ UNIQUE KEY `i_c_name` (`c_name`),
+ KEY `i_c_nationkey` (`c_nationkey`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+select * from customer where c_custkey=3;
+c_custkey c_name c_address c_nationkey c_phone c_acctbal c_mktsegment c_comment
+3 Customer#000000003 MG9kdTD2WBHm 1 11-719-748-3364 7498.12 AUTOMOBILE special packages wake. slyly reg
+DROP DATABASE dbt3_s001;
+set @@storage_engine= default;
diff --git a/mysql-test/r/cassandra.result b/mysql-test/r/cassandra.result
new file mode 100644
index 00000000000..e26df069f93
--- /dev/null
+++ b/mysql-test/r/cassandra.result
@@ -0,0 +1,593 @@
+drop table if exists t0, t1;
+create table t1 (a int) engine=cassandra
+thrift_host='localhost' keyspace='foo' column_family='colfam';
+ERROR 42000: This table type requires a primary key
+create table t1 (a int primary key, b int) engine=cassandra
+thrift_host='localhost' keyspace='foo' column_family='colfam';
+ERROR HY000: Unable to connect to foreign data source: Default TException. [Keyspace foo does not exist]
+create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
+thrift_host='127.0.0.2' keyspace='foo' column_family='colfam';
+ERROR HY000: Unable to connect to foreign data source: connect() failed: Connection refused [1]
+create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
+thrift_host='localhost' keyspace='no_such_keyspace' column_family='colfam';
+ERROR HY000: Unable to connect to foreign data source: Default TException. [Keyspace no_such_keyspace does not exist]
+create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
+thrift_host='localhost' keyspace='no_such_keyspace';
+ERROR HY000: Unable to connect to foreign data source: keyspace and column_family table options must be specified
+# Now, create a table for real and insert data
+create table t1 (pk varchar(36) primary key, data1 varchar(60), data2 bigint) engine=cassandra
+thrift_host='localhost' keyspace='mariadbtest2' column_family='cf1';
+# Just in case there were left-overs from previous:
+delete from t1;
+select * from t1;
+pk data1 data2
+insert into t1 values ('rowkey10', 'data1-value', 123456);
+insert into t1 values ('rowkey11', 'data1-value2', 34543);
+insert into t1 values ('rowkey12', 'data1-value3', 454);
+select * from t1;
+pk data1 data2
+rowkey12 data1-value3 454
+rowkey10 data1-value 123456
+rowkey11 data1-value2 34543
+explain
+select * from t1 where pk='rowkey11';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 const PRIMARY PRIMARY 38 const 1
+select * from t1 where pk='rowkey11';
+pk data1 data2
+rowkey11 data1-value2 34543
+delete from t1 where pk='rowkey11';
+select * from t1;
+pk data1 data2
+rowkey12 data1-value3 454
+rowkey10 data1-value 123456
+delete from t1;
+select * from t1;
+pk data1 data2
+#
+# A query with filesort (check that table_flags() & HA_REC_NOT_IN_SEQ,
+# also check ::rnd_pos()
+#
+insert into t1 values ('rowkey10', 'data1-value', 123456);
+insert into t1 values ('rowkey11', 'data1-value2', 34543);
+insert into t1 values ('rowkey12', 'data1-value3', 454);
+select * from t1 order by data2;
+pk data1 data2
+rowkey12 data1-value3 454
+rowkey11 data1-value2 34543
+rowkey10 data1-value 123456
+delete from t1;
+drop table t1;
+#
+# MDEV-476: Cassandra: Server crashes in calculate_key_len on DELETE with ORDER BY
+#
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, a BIGINT) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+INSERT INTO t1 VALUES (1,1),(2,2);
+DELETE FROM t1 ORDER BY a LIMIT 1;
+DROP TABLE t1;
+#
+# Batched INSERT
+#
+show variables like 'cassandra_insert_batch_size';
+Variable_name Value
+cassandra_insert_batch_size 100
+show status like 'cassandra_row_insert%';
+Variable_name Value
+Cassandra_row_insert_batches 7
+Cassandra_row_inserts 8
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, a BIGINT) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+delete from t1;
+INSERT INTO t1 VALUES (1,1),(2,2);
+DELETE FROM t1 ORDER BY a LIMIT 1;
+DROP TABLE t1;
+show status like 'cassandra_row_insert%';
+Variable_name Value
+Cassandra_row_insert_batches 8
+Cassandra_row_inserts 10
+# FLUSH STATUS doesn't work for our variables, just like with InnoDB.
+flush status;
+show status like 'cassandra_row_insert%';
+Variable_name Value
+Cassandra_row_insert_batches 0
+Cassandra_row_inserts 0
+#
+# Batched Key Access
+#
+# Control variable (we are not yet able to make use of MRR's buffer)
+show variables like 'cassandra_multi%';
+Variable_name Value
+cassandra_multiget_batch_size 100
+# MRR-related status variables:
+show status like 'cassandra_multi%';
+Variable_name Value
+Cassandra_multiget_keys_scanned 0
+Cassandra_multiget_reads 0
+Cassandra_multiget_rows_read 0
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, a BIGINT) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+delete from t1;
+INSERT INTO t1 VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
+set @tmp_jcl=@@join_cache_level;
+set join_cache_level=8;
+explain select * from t1 A, t1 B where B.rowkey=A.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE A ALL NULL NULL NULL NULL 1000 Using where
+1 SIMPLE B eq_ref PRIMARY PRIMARY 8 test.A.a 1 Using join buffer (flat, BKAH join); multiget_slice
+select * from t1 A, t1 B where B.rowkey=A.a;
+rowkey a rowkey a
+0 0 0 0
+1 1 1 1
+2 2 2 2
+3 3 3 3
+4 4 4 4
+5 5 5 5
+6 6 6 6
+7 7 7 7
+8 8 8 8
+9 9 9 9
+show status like 'cassandra_multi%';
+Variable_name Value
+Cassandra_multiget_keys_scanned 10
+Cassandra_multiget_reads 1
+Cassandra_multiget_rows_read 10
+insert into t1 values(1, 8);
+insert into t1 values(3, 8);
+insert into t1 values(5, 8);
+insert into t1 values(7, 8);
+select * from t1 A, t1 B where B.rowkey=A.a;
+rowkey a rowkey a
+0 0 0 0
+2 2 2 2
+4 4 4 4
+6 6 6 6
+1 8 8 8
+7 8 8 8
+8 8 8 8
+5 8 8 8
+3 8 8 8
+9 9 9 9
+show status like 'cassandra_multi%';
+Variable_name Value
+Cassandra_multiget_keys_scanned 16
+Cassandra_multiget_reads 2
+Cassandra_multiget_rows_read 16
+delete from t1;
+drop table t1;
+#
+# MDEV-480: TRUNCATE TABLE on a Cassandra table does not remove rows
+#
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, a BIGINT) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+INSERT INTO t1 VALUES (0,0),(1,1),(2,2);
+truncate table t1;
+select * from t1;
+rowkey a
+drop table t1;
+#
+# MDEV-494, part #1: phantom row for big full-scan selects
+#
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, a BIGINT) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+insert into t1 select A.a + 10 * B.a + 100*C.a, 12345 from t0 A, t0 B, t0 C;
+select count(*) from t1;
+count(*)
+1000
+select count(*) from t1 where a=12345;
+count(*)
+1000
+delete from t1;
+drop table t1;
+drop table t0;
+# 32-bit INT type support
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, intcol INT) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf3';
+insert into t1 values (10,10);
+insert into t1 values (12,12);
+delete from t1;
+drop table t1;
+#
+# Try accessing column family w/o explicitly defined columns
+#
+CREATE TABLE t1 (my_primary_key varchar(10) PRIMARY KEY) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf10';
+ERROR HY000: Internal error: 'target column family has no key_alias defined, PRIMARY KEY column must be named 'rowkey''
+CREATE TABLE t1 (rowkey varchar(10) PRIMARY KEY) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf10';
+DROP TABLE t1;
+#
+# Timestamp datatype support
+#
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, datecol timestamp) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf4';
+delete from t2;
+insert into t2 values (1, '2012-08-29 01:23:45');
+select * from t2;
+rowkey datecol
+1 2012-08-29 01:23:45
+delete from t2;
+# MDEV-498: Cassandra: Inserting a timestamp does not work on a 32-bit system
+INSERT INTO t2 VALUES (10,'2012-12-12 12:12:12');
+SELECT * FROM t2;
+rowkey datecol
+10 2012-12-12 12:12:12
+delete from t2;
+#
+# (no MDEV#) Check that insert counters work correctly
+#
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+set cassandra_insert_batch_size=10;
+insert into t2 select A.a+10*B.a, now() from t0 A, t0 B;
+inserts insert_batches
+100 10
+set cassandra_insert_batch_size=1;
+insert into t2 select A.a+10*B.a+100, now() from t0 A, t0 B;
+inserts insert_batches
+100 100
+delete from t2;
+drop table t2;
+drop table t0;
+#
+# UUID datatype support
+#
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol char(36)) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+delete from t2;
+insert into t2 values(1,'9b5658dc-f32f-11e1-94cd-f46d046e9f09');
+insert into t2 values(2,'not-an-uuid');
+ERROR 22003: Out of range value for column 'uuidcol' at row 1
+insert into t2 values(3,'9b5658dc-f32f-11e1=94cd-f46d046e9f09');
+ERROR 22003: Out of range value for column 'uuidcol' at row 1
+insert into t2 values(4,'9b5658dc-fzzf-11e1-94cd-f46d046e9f09');
+ERROR 22003: Out of range value for column 'uuidcol' at row 1
+insert into t2 values
+(5,'9b5658dc-f11f-11e1-94cd-f46d046e9f09'),
+(6,'9b5658dc-f11f011e1-94cd-f46d046e9f09');
+ERROR 22003: Out of range value for column 'uuidcol' at row 2
+select * from t2;
+rowkey uuidcol
+1 9b5658dc-f32f-11e1-94cd-f46d046e9f09
+5 9b5658dc-f11f-11e1-94cd-f46d046e9f09
+delete from t2;
+drop table t2;
+CREATE TABLE t2 (rowkey char(36) PRIMARY KEY, col1 int) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf6';
+delete from t2;
+insert into t2 values('9b5658dc-f32f-11e1-94cd-f46d046e9f09', 1234);
+insert into t2 values('not-an-uuid', 563);
+ERROR 22003: Out of range value for column 'rowkey' at row 1
+select * from t2;
+rowkey col1
+9b5658dc-f32f-11e1-94cd-f46d046e9f09 1234
+delete from t2;
+drop table t2;
+#
+# boolean datatype support
+#
+CREATE TABLE t2 (rowkey int PRIMARY KEY, boolcol bool) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf7';
+insert into t2 values (0, 0);
+insert into t2 values (1, 1);
+select * from t2;
+rowkey boolcol
+0 0
+1 1
+delete from t2;
+drop table t2;
+#
+# Counter datatype support (read-only)
+#
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, countercol bigint) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf8';
+select * from t2;
+rowkey countercol
+cnt1 1
+cnt2 100
+drop table t2;
+#
+# Check that @@cassandra_default_thrift_host works
+#
+show variables like 'cassandra_default_thrift_host';
+Variable_name Value
+cassandra_default_thrift_host
+set @tmp=@@cassandra_default_thrift_host;
+set cassandra_default_thrift_host='localhost';
+ERROR HY000: Variable 'cassandra_default_thrift_host' is a GLOBAL variable and should be set with SET GLOBAL
+set global cassandra_default_thrift_host='localhost';
+# Try creating a table without specifying thrift_host:
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, countercol bigint) ENGINE=CASSANDRA
+keyspace='mariadbtest2' column_family = 'cf8';
+select * from t2;
+rowkey countercol
+cnt1 1
+cnt2 100
+drop table t2;
+set global cassandra_default_thrift_host=@tmp;
+#
+# Consistency settings
+#
+show variables like 'cassandra_%consistency';
+Variable_name Value
+cassandra_read_consistency ONE
+cassandra_write_consistency ONE
+set @tmp=@@cassandra_write_consistency;
+# Unfortunately, there is no easy way to check if setting have the effect..
+set cassandra_write_consistency='ONE';
+set cassandra_write_consistency='QUORUM';
+set cassandra_write_consistency='LOCAL_QUORUM';
+set cassandra_write_consistency='EACH_QUORUM';
+set cassandra_write_consistency='ALL';
+set cassandra_write_consistency='ANY';
+set cassandra_write_consistency='TWO';
+set cassandra_write_consistency='THREE';
+set cassandra_write_consistency=@tmp;
+#
+# varint datatype support
+#
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, varint_col varbinary(32)) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf9';
+select rowkey, hex(varint_col) from t2;
+rowkey hex(varint_col)
+val-01 01
+val-0x123456 123456
+val-0x12345678 12345678
+drop table t2;
+# now, let's check what happens when MariaDB's column is not wide enough:
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, varint_col varbinary(2)) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf9';
+select rowkey, hex(varint_col) from t2;
+ERROR HY000: Internal error: 'Unable to convert value for field `varint_col` from Cassandra's data format. Source data is 4 bytes, 0x12345678'
+drop table t2;
+#
+# Decimal datatype support
+#
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, decimal_col varbinary(32)) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf11';
+select rowkey, hex(decimal_col) from t2;
+rowkey hex(decimal_col)
+val_1.5 000000010F
+val_0.5 0000000105
+val_1234 0000000004D2
+drop table t2;
+#
+# Mapping TIMESTAMP -> int64
+#
+set @save_tz= @@time_zone;
+set time_zone='UTC';
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, datecol timestamp) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf4';
+insert into t2 values (1, '2012-08-29 01:23:45');
+INSERT INTO t2 VALUES (10,'2012-08-29 01:23:46');
+drop table t2;
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, datecol bigint) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf4';
+select * from t2;
+rowkey datecol
+1 1346203425000
+10 1346203426000
+delete from t2;
+drop table t2;
+set time_zone=@save_tz;
+#
+# Check whether changing parameters with ALTER TABLE works.
+#
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, decimal_col varbinary(32)) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf11';
+drop table t2;
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, decimal_col varbinary(32)) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf11';
+alter table t2 column_family='cf12';
+Writes made during ALTER TABLE
+0
+drop table t2;
+#
+# UPDATE command support
+#
+create table t1 (pk varchar(36) primary key, data1 varchar(60), data2 bigint) engine=cassandra
+thrift_host='localhost' keyspace='mariadbtest2' column_family='cf1';
+insert into t1 values ('rowkey10', 'data1-value', 123456);
+insert into t1 values ('rowkey11', 'data1-value2', 34543);
+insert into t1 values ('rowkey12', 'data1-value3', 454);
+select * from t1;
+pk data1 data2
+rowkey12 data1-value3 454
+rowkey10 data1-value 123456
+rowkey11 data1-value2 34543
+update t1 set data1='updated-1' where pk='rowkey11';
+select * from t1;
+pk data1 data2
+rowkey12 data1-value3 454
+rowkey10 data1-value 123456
+rowkey11 updated-1 34543
+update t1 set pk='new-rowkey12' where pk='rowkey12';
+select * from t1;
+pk data1 data2
+rowkey10 data1-value 123456
+new-rowkey12 data1-value3 454
+rowkey11 updated-1 34543
+delete from t1;
+drop table t1;
+#
+# Dynamic columns support
+#
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol blob DYNAMIC_COLUMN_STORAGE=1) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+drop table t2;
+#error: dynamic column is not a blob
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol char(36) DYNAMIC_COLUMN_STORAGE=1) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+ERROR 42000: Incorrect column specifier for column 'uuidcol'
+#error: double dynamic column
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol blob DYNAMIC_COLUMN_STORAGE=1, textcol blob DYNAMIC_COLUMN_STORAGE=1) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+ERROR 42000: Incorrect column specifier for column 'textcol'
+#
+# Dynamic column read
+#
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol char(36)) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+delete from t2;
+insert into t2 values(1,'9b5658dc-f32f-11e1-94cd-f46d046e9f09');
+insert into t2 values(2,'9b5658dc-f32f-11e1-94cd-f46d046e9f0a');
+drop table t2;
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+select rowkey, column_list(dyn), column_get(dyn, 'uuidcol' as char) from t2;
+rowkey column_list(dyn) column_get(dyn, 'uuidcol' as char)
+1 `uuidcol` 9b5658dc-f32f-11e1-94cd-f46d046e9f09
+2 `uuidcol` 9b5658dc-f32f-11e1-94cd-f46d046e9f0a
+drop table t2;
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol char(36)) ENGINE=CASSANDRA
+thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+delete from t2;
+drop table t2;
+#
+# Dynamic column insert
+#
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+insert into t2 values (1, column_create("dyn1", 1, "dyn2", "two"));
+select rowkey, column_json(dyn) from t2;
+rowkey column_json(dyn)
+1 {"dyn1":"1","dyn2":"two"}
+delete from t2;
+drop table t2;
+# bigint
+CREATE TABLE t1 (rowkey bigint PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+insert into t1 values (1, column_create("dyn1", 1, "dyn2", "two", 'a', 254324));
+insert into t1 values (2, column_create("dyn1", 1, "dyn2", "two", 'a', 2543));
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"a":254324,"dyn1":"1","dyn2":"two"}
+2 {"a":2543,"dyn1":"1","dyn2":"two"}
+delete from t1;
+drop table t1;
+# int
+CREATE TABLE t1 (rowkey bigint PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf3';
+insert into t1 values (1, column_create("dyn1", 1, "dyn2", "two", 'intcol', 254324));
+insert into t1 values (2, column_create("dyn1", 1, "dyn2", "two", 'intcol', 2543));
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"dyn1":"1","dyn2":"two","intcol":254324}
+2 {"dyn1":"1","dyn2":"two","intcol":2543}
+delete from t1;
+drop table t1;
+# timestamp
+CREATE TABLE t1 (rowkey bigint PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf4';
+insert into t1 values (1, column_create("dyn1", 1, "dyn2", "two", 'datecol', 254324));
+insert into t1 values (2, column_create("dyn1", 1, "dyn2", "two", 'datecol', 2543));
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"dyn1":"1","dyn2":"two","datecol":254324}
+2 {"dyn1":"1","dyn2":"two","datecol":2543}
+delete from t1;
+drop table t1;
+# boolean
+CREATE TABLE t1 (rowkey int PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf7';
+insert into t1 values (1, column_create("dyn1", 1, "dyn2", "two", 'boolcol', 254324));
+insert into t1 values (2, column_create("dyn1", 1, "dyn2", "two", 'boolcol', 0));
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"dyn1":"1","dyn2":"two","boolcol":1}
+2 {"dyn1":"1","dyn2":"two","boolcol":0}
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"dyn1":"1","dyn2":"two","boolcol":1}
+2 {"dyn1":"1","dyn2":"two","boolcol":0}
+update t1 set dyn=column_add(dyn, "dyn2", null, "dyn3", "3");
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"dyn1":"1","dyn3":"3","boolcol":1}
+2 {"dyn1":"1","dyn3":"3","boolcol":0}
+update t1 set dyn=column_add(dyn, "dyn1", null) where rowkey= 1;
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"dyn3":"3","boolcol":1}
+2 {"dyn1":"1","dyn3":"3","boolcol":0}
+update t1 set dyn=column_add(dyn, "dyn3", null, "a", "ddd");
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"a":"ddd","boolcol":1}
+2 {"a":"ddd","dyn1":"1","boolcol":0}
+update t1 set dyn=column_add(dyn, "12345678901234", "ddd");
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"a":"ddd","boolcol":1,"12345678901234":"ddd"}
+2 {"a":"ddd","dyn1":"1","boolcol":0,"12345678901234":"ddd"}
+update t1 set dyn=column_add(dyn, "12345678901234", null);
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"a":"ddd","boolcol":1}
+2 {"a":"ddd","dyn1":"1","boolcol":0}
+update t1 set dyn=column_add(dyn, 'boolcol', null) where rowkey= 2;
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"a":"ddd","boolcol":1}
+2 {"a":"ddd","dyn1":"1"}
+update t1 set rowkey= 3, dyn=column_add(dyn, "dyn1", null, 'boolcol', 0) where rowkey= 2;
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+1 {"a":"ddd","boolcol":1}
+3 {"a":"ddd","boolcol":0}
+delete from t1;
+drop table t1;
+CREATE TABLE t1 (rowkey varchar(10) PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cfd1';
+select * from t1;
+ERROR HY000: Internal error: 'Unable to convert value for field `dyn` from Cassandra's data format. Name length exceed limit of 16383: 'very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_v'
+drop table t1;
+CREATE TABLE t1 (rowkey int PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes)
+ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cfd2';
+DELETE FROM t1;
+insert into t1 values (1, column_create("dyn", 1));
+select rowkey, column_list(dyn) from t1;
+rowkey column_list(dyn)
+1 `dyn`
+delete from t1;
+DROP TABLE t1;
+CREATE TABLE t1 (rowkey int PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes)
+ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cfd2';
+insert into t1 values (1,'9b5658dc-f32f-11e1-94cd-f46d046e9f0a');
+ERROR HY000: Encountered illegal format of dynamic column string
+delete from t1;
+DROP TABLE t1;
+#
+# MDEV-565: Server crashes in ha_cassandra::write_row on
+# inserting NULL into a dynamic column
+#
+CREATE TABLE t1 (rowkey int PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes)
+ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cfd2';
+insert into t1 values (1, NULL);
+delete from t1;
+DROP TABLE t1;
+#
+# strange side effect of Cassandra - remiving all columns of primary
+# key removes all row.
+#
+CREATE TABLE t1 (rowkey int PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes)
+ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cfd2';
+INSERT INTO t1 VALUES(2,column_create("ab","ab"));
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+2 {"ab":"ab"}
+UPDATE t1 set dyn=NULL;
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+INSERT INTO t1 VALUES(2,column_create("ab","ab"));
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+2 {"ab":"ab"}
+UPDATE t1 set dyn="";
+select rowkey, column_json(dyn) from t1;
+rowkey column_json(dyn)
+delete from t1;
+DROP TABLE t1;
+#
+# MDEV-4005 #Server crashes on creating a Cassandra table
+# with a mix of static and dynamic columns
+#
+DROP TABLE IF EXISTS t1, t2;
+CREATE TABLE t1 (
+pk int primary key,
+col_int int,
+dyncol blob DYNAMIC_COLUMN_STORAGE=yes
+) ENGINE=cassandra keyspace='bug' thrift_host = '127.0.0.1' column_family='cf1';
+drop table t1;
diff --git a/mysql-test/r/comments.result b/mysql-test/r/comments.result
index 8aebe95c5ac..e65c886014d 100644
--- a/mysql-test/r/comments.result
+++ b/mysql-test/r/comments.result
@@ -10,7 +10,7 @@ ERROR 42000: Query was empty
select 1 /*!32301 +1 */;
1 +1
2
-select 1 /*!52301 +1 */;
+select 1 /*!952301 +1 */;
1
1
select 1--1;
@@ -35,19 +35,19 @@ select 1 /*M!50000 +1 */;
select 1 /*M!50300 +1 */;
1 +1
2
-select 2 /*M!99999 +1 */;
+select 2 /*M!999999 +1 */;
2
2
select 2 /*M!0000 +1 */;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '0000 +1 */' at line 1
select 1/*!2*/;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '2*/' at line 1
-select 1/*!000002*/;
+select 1/*!0000002*/;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '2*/' at line 1
select 1/*!999992*/;
1
1
-select 1 + /*!00000 2 */ + 3 /*!99999 noise*/ + 4;
+select 1 + /*!00000 2 */ + 3 /*!999999 noise*/ + 4;
1 + 2 + 3 + 4
10
drop table if exists table_28779;
@@ -60,8 +60,8 @@ prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*! AND 2=2;";
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*! AND 2=2;*";
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '*' at line 1
-prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*!98765' AND b = 'bar';";
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '/*!98765' AND b = 'bar'' at line 1
-prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*!98765' AND b = 'bar';*";
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '/*!98765' AND b = 'bar';*' at line 1
+prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*!998765' AND b = 'bar';";
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '/*!998765' AND b = 'bar'' at line 1
+prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*!998765' AND b = 'bar';*";
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '/*!998765' AND b = 'bar';*' at line 1
drop table table_28779;
diff --git a/mysql-test/r/connect.result b/mysql-test/r/connect.result
index 09eaf9855b0..a4bd6a262a7 100644
--- a/mysql-test/r/connect.result
+++ b/mysql-test/r/connect.result
@@ -1,6 +1,7 @@
drop table if exists t1,t2;
show tables;
Tables_in_mysql
+column_stats
columns_priv
db
event
@@ -11,6 +12,7 @@ help_keyword
help_relation
help_topic
host
+index_stats
ndb_binlog_index
plugin
proc
@@ -18,6 +20,7 @@ procs_priv
proxies_priv
servers
slow_log
+table_stats
tables_priv
time_zone
time_zone_leap_second
@@ -35,6 +38,7 @@ grant ALL on *.* to test@localhost identified by "gambling";
grant ALL on *.* to test@127.0.0.1 identified by "gambling";
show tables;
Tables_in_mysql
+column_stats
columns_priv
db
event
@@ -45,6 +49,7 @@ help_keyword
help_relation
help_topic
host
+index_stats
ndb_binlog_index
plugin
proc
@@ -52,6 +57,7 @@ procs_priv
proxies_priv
servers
slow_log
+table_stats
tables_priv
time_zone
time_zone_leap_second
@@ -77,6 +83,7 @@ ERROR HY000: Password hash should be a 41-digit hexadecimal number
set password=old_password('gambling3');
show tables;
Tables_in_mysql
+column_stats
columns_priv
db
event
@@ -87,6 +94,7 @@ help_keyword
help_relation
help_topic
host
+index_stats
ndb_binlog_index
plugin
proc
@@ -94,6 +102,7 @@ procs_priv
proxies_priv
servers
slow_log
+table_stats
tables_priv
time_zone
time_zone_leap_second
diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result
index 4cb14aabb2c..7c556354d2e 100644
--- a/mysql-test/r/create.result
+++ b/mysql-test/r/create.result
@@ -55,9 +55,9 @@ ERROR 42000: Incorrect table name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
create table a (`aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa` int);
ERROR 42000: Identifier name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' is too long
create table t1 (a datetime default now());
-ERROR 42000: Invalid default value for 'a'
+drop table t1;
create table t1 (a datetime on update now());
-ERROR HY000: Invalid ON UPDATE clause for 'a' column
+drop table t1;
create table t1 (a int default 100 auto_increment);
ERROR 42000: Invalid default value for 'a'
create table t1 (a tinyint default 1000);
@@ -1762,7 +1762,9 @@ t1 CREATE TABLE `t1` (
`TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000',
`STAGE` tinyint(2) NOT NULL DEFAULT '0',
`MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0',
- `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000'
+ `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000',
+ `MEMORY_USED` int(7) NOT NULL DEFAULT '0',
+ `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0'
) DEFAULT CHARSET=utf8
drop table t1;
create temporary table t1 like information_schema.processlist;
@@ -1780,7 +1782,9 @@ t1 CREATE TEMPORARY TABLE `t1` (
`TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000',
`STAGE` tinyint(2) NOT NULL DEFAULT '0',
`MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0',
- `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000'
+ `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000',
+ `MEMORY_USED` int(7) NOT NULL DEFAULT '0',
+ `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0'
) DEFAULT CHARSET=utf8
drop table t1;
create table t1 like information_schema.character_sets;
diff --git a/mysql-test/r/dyncol.result b/mysql-test/r/dyncol.result
index 13543223ad8..925fb38a0f8 100644
--- a/mysql-test/r/dyncol.result
+++ b/mysql-test/r/dyncol.result
@@ -1088,7 +1088,7 @@ column_list(column_add(column_create(1, 1), 1, null))
select column_list(column_add(column_create(1, 1), 1, ""));
column_list(column_add(column_create(1, 1), 1, ""))
-1
+`1`
select hex(column_add("", 1, 1));
hex(column_add("", 1, 1))
00010001000002
@@ -1133,10 +1133,10 @@ column_exists(column_create(1, 1212 as integer, 2, 1212 as integer), 4)
# column list
select column_list(column_create(1, 1212 as integer, 2, 1212 as integer));
column_list(column_create(1, 1212 as integer, 2, 1212 as integer))
-1,2
+`1`,`2`
select column_list(column_create(1, 1212 as integer));
column_list(column_create(1, 1212 as integer))
-1
+`1`
select column_list(column_create(1, NULL as integer));
column_list(column_create(1, NULL as integer))
@@ -1218,35 +1218,35 @@ sum(column_get(str, 1 as int))
11
select id, column_list(str) from t1 where id= 5;
id column_list(str)
-5 1,2,3,10
+5 `1`,`2`,`3`,`10`
update t1 set str=column_delete(str, 3, 4, 2) where id= 5;
select id, length(str), column_list(str), column_get(str, 1 as int), column_get(str, 2 as char), column_get(str, 3 as int) from t1;
id length(str) column_list(str) column_get(str, 1 as int) column_get(str, 2 as char) column_get(str, 3 as int)
-1 12 1,2 1 a NULL
-2 12 1,2 2 a NULL
-3 12 2,3 NULL c 100
-4 16 1,2,3 5 c 100
-5 15 1,10 6 NULL NULL
-6 21 2,3,10 NULL c 100
+1 12 `1`,`2` 1 a NULL
+2 12 `1`,`2` 2 a NULL
+3 12 `2`,`3` NULL c 100
+4 16 `1`,`2`,`3` 5 c 100
+5 15 `1`,`10` 6 NULL NULL
+6 21 `2`,`3`,`10` NULL c 100
update t1 set str=column_add(str, 4, 45 as char, 2, 'c') where id= 5;
select id, length(str), column_list(str), column_get(str, 1 as int), column_get(str, 2 as char), column_get(str, 3 as int) from t1 where id = 5;
id length(str) column_list(str) column_get(str, 1 as int) column_get(str, 2 as char) column_get(str, 3 as int)
-5 26 1,2,4,10 6 c NULL
+5 26 `1`,`2`,`4`,`10` 6 c NULL
select id, length(str), column_list(str), column_exists(str, 4) from t1;
id length(str) column_list(str) column_exists(str, 4)
-1 12 1,2 0
-2 12 1,2 0
-3 12 2,3 0
-4 16 1,2,3 0
-5 26 1,2,4,10 1
-6 21 2,3,10 0
+1 12 `1`,`2` 0
+2 12 `1`,`2` 0
+3 12 `2`,`3` 0
+4 16 `1`,`2`,`3` 0
+5 26 `1`,`2`,`4`,`10` 1
+6 21 `2`,`3`,`10` 0
select sum(column_get(str, 1 as int)), column_list(str) from t1 group by 2;
sum(column_get(str, 1 as int)) column_list(str)
-3 1,2
-5 1,2,3
-6 1,2,4,10
-NULL 2,3
-NULL 2,3,10
+3 `1`,`2`
+5 `1`,`2`,`3`
+6 `1`,`2`,`4`,`10`
+NULL `2`,`3`
+NULL `2`,`3`,`10`
select id, hex(str) from t1;
id hex(str)
1 00020001000002000B020861
@@ -1282,11 +1282,11 @@ id
5
select id, column_list(str), length(str) from t1 where id=5;
id column_list(str) length(str)
-5 1,2,4,5,10 100048
+5 `1`,`2`,`4`,`5`,`10` 100048
update t1 set str=column_delete(str, 5) where id=5;
select id, column_list(str), length(str) from t1 where id=5;
id column_list(str) length(str)
-5 1,2,4,10 34
+5 `1`,`2`,`4`,`10` 34
drop table t1;
#
# LP#778905: Assertion `value->year <= 9999' failed in
@@ -1306,7 +1306,7 @@ INSERT INTO t1 SET f1 = COLUMN_CREATE( 2 , 'cde' );
SELECT HEX(COLUMN_ADD(f1, 1, 'abc')), COLUMN_LIST(f1) FROM t1;
HEX(COLUMN_ADD(f1, 1, 'abc')) COLUMN_LIST(f1)
NULL NULL
-0002000100030200230861626308636465 2
+0002000100030200230861626308636465 `2`
SELECT COLUMN_ADD(f1, 1, 'abc'), COLUMN_LIST(f1) FROM t1;
DROP TABLE t1;
#
@@ -1335,3 +1335,305 @@ hex(COLUMN_CREATE(0, COLUMN_GET(COLUMN_CREATE(0, 0.0 as decimal), 0 as decimal))
select hex(COLUMN_CREATE(0, 0.0 as decimal));
hex(COLUMN_CREATE(0, 0.0 as decimal))
000100000004
+#
+# test of symbolic names
+#
+# creation test (names)
+set names utf8;
+select hex(column_create("адын", 1212));
+hex(column_create("адын", 1212))
+040100080000000000D0B0D0B4D18BD0BD7809
+select hex(column_create("1212", 1212));
+hex(column_create("1212", 1212))
+040100040000000000313231327809
+select hex(column_create(1212, 2, "www", 3));
+hex(column_create(1212, 2, "www", 3))
+04020007000000000003001000777777313231320604
+select hex(column_create("1212", 2, "www", 3));
+hex(column_create("1212", 2, "www", 3))
+04020007000000000003001000777777313231320604
+select hex(column_create("1212", 2, 3, 3));
+hex(column_create("1212", 2, 3, 3))
+0402000500000000000100100033313231320604
+select hex(column_create("1212", 2, "адын", 1, 3, 3));
+hex(column_create("1212", 2, "адын", 1, 3, 3))
+0403000D000000000001001000050020003331323132D0B0D0B4D18BD0BD060402
+set names default;
+# fetching column test (names)
+set names utf8;
+select column_get(column_create("адын", 1212), "адын" as int);
+column_get(column_create("адын", 1212), "адын" as int)
+1212
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), "адын" as int);
+column_get(column_create("1212", 2, "адын", 1, 3, 3), "адын" as int)
+1
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), 1212 as int);
+column_get(column_create("1212", 2, "адын", 1, 3, 3), 1212 as int)
+2
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), "3" as int);
+column_get(column_create("1212", 2, "адын", 1, 3, 3), "3" as int)
+3
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), 3 as int);
+column_get(column_create("1212", 2, "адын", 1, 3, 3), 3 as int)
+3
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), 4 as int);
+column_get(column_create("1212", 2, "адын", 1, 3, 3), 4 as int)
+NULL
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), "4" as int);
+column_get(column_create("1212", 2, "адын", 1, 3, 3), "4" as int)
+NULL
+set names default;
+# column existance test (names)
+set names utf8;
+select column_exists(column_create("адын", 1212), "адын");
+column_exists(column_create("адын", 1212), "адын")
+1
+select column_exists(column_create("адын", 1212), "aады");
+column_exists(column_create("адын", 1212), "aады")
+0
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), "адын");
+column_exists(column_create("1212", 2, "адын", 1, 3, 3), "адын")
+1
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), 1212);
+column_exists(column_create("1212", 2, "адын", 1, 3, 3), 1212)
+1
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), "3");
+column_exists(column_create("1212", 2, "адын", 1, 3, 3), "3")
+1
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), 3);
+column_exists(column_create("1212", 2, "адын", 1, 3, 3), 3)
+1
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), 4);
+column_exists(column_create("1212", 2, "адын", 1, 3, 3), 4)
+0
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), "4");
+column_exists(column_create("1212", 2, "адын", 1, 3, 3), "4")
+0
+set names default;
+# column changing test (names)
+select hex(column_add(column_create(1, "AAA"), "b", "BBB"));
+hex(column_add(column_create(1, "AAA"), "b", "BBB"))
+0402000200000003000100430031620841414108424242
+select hex(column_add(column_create("1", "AAA"), "b", "BBB"));
+hex(column_add(column_create("1", "AAA"), "b", "BBB"))
+0402000200000003000100430031620841414108424242
+select column_get(column_add(column_create(1, "AAA"), "b", "BBB"), 1 as char);
+column_get(column_add(column_create(1, "AAA"), "b", "BBB"), 1 as char)
+AAA
+select column_get(column_add(column_create(1, "AAA"), "b", "BBB"), "b" as char);
+column_get(column_add(column_create(1, "AAA"), "b", "BBB"), "b" as char)
+BBB
+select hex(column_add(column_create("a", "AAA"), 1, "BBB"));
+hex(column_add(column_create("a", "AAA"), 1, "BBB"))
+0402000200000003000100430031610842424208414141
+select hex(column_add(column_create("a", "AAA"), "1", "BBB"));
+hex(column_add(column_create("a", "AAA"), "1", "BBB"))
+0402000200000003000100430031610842424208414141
+select hex(column_add(column_create("a", 1212 as integer), "b", "1212" as integer));
+hex(column_add(column_create("a", 1212 as integer), "b", "1212" as integer))
+04020002000000000001002000616278097809
+select hex(column_add(column_create("a", 1212 as integer), "a", "1212" as integer));
+hex(column_add(column_create("a", 1212 as integer), "a", "1212" as integer))
+040100010000000000617809
+select hex(column_add(column_create("a", 1212 as integer), "a", NULL as integer));
+hex(column_add(column_create("a", 1212 as integer), "a", NULL as integer))
+0400000000
+select hex(column_add(column_create("a", 1212 as integer), "b", NULL as integer));
+hex(column_add(column_create("a", 1212 as integer), "b", NULL as integer))
+040100010000000000617809
+select hex(column_add(column_create("a", 1212 as integer), "b", 1212 as integer, "a", 11 as integer));
+hex(column_add(column_create("a", 1212 as integer), "b", 1212 as integer, "a", 11 as integer))
+040200020000000000010010006162167809
+select column_get(column_add(column_create("a", 1212 as integer), "b", 1212 as integer, "a", 11 as integer), "a" as integer);
+column_get(column_add(column_create("a", 1212 as integer), "b", 1212 as integer, "a", 11 as integer), "a" as integer)
+11
+select column_get(column_add(column_create("a", 1212 as integer), "b", 1212 as integer, "a", 11 as integer), "b" as integer);
+column_get(column_add(column_create("a", 1212 as integer), "b", 1212 as integer, "a", 11 as integer), "b" as integer)
+1212
+select hex(column_add(column_create("a", 1212 as integer), "a", 1212 as integer, "b", 11 as integer));
+hex(column_add(column_create("a", 1212 as integer), "a", 1212 as integer, "b", 11 as integer))
+040200020000000000010020006162780916
+select hex(column_add(column_create("a", NULL as integer), "a", 1212 as integer, "b", 11 as integer));
+hex(column_add(column_create("a", NULL as integer), "a", 1212 as integer, "b", 11 as integer))
+040200020000000000010020006162780916
+select hex(column_add(column_create("a", 1212 as integer, "b", 1212 as integer), "a", 11 as integer));
+hex(column_add(column_create("a", 1212 as integer, "b", 1212 as integer), "a", 11 as integer))
+040200020000000000010010006162167809
+select hex(column_add(column_create("a", 1), "a", null));
+hex(column_add(column_create("a", 1), "a", null))
+0400000000
+select column_list(column_add(column_create("a", 1), "a", null));
+column_list(column_add(column_create("a", 1), "a", null))
+
+select column_list(column_add(column_create("a", 1), "a", ""));
+column_list(column_add(column_create("a", 1), "a", ""))
+`a`
+select hex(column_add("", "a", 1));
+hex(column_add("", "a", 1))
+0401000100000000006102
+# column delete (names)
+select hex(column_delete(column_create("a", 1212 as integer, "b", 1212 as integer), "a"));
+hex(column_delete(column_create("a", 1212 as integer, "b", 1212 as integer), "a"))
+040100010000000000627809
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "b"));
+hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "b"))
+0402000200000000000100100061630206
+select hex(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer));
+hex(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer))
+0403000300000000000100100002002000616263020406
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "c"));
+hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "c"))
+0402000200000000000100100061620204
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "d"));
+hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "d"))
+0403000300000000000100100002002000616263020406
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "b", "a"));
+hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "b", "a"))
+0401000100000000006306
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "b", "c"));
+hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "b", "c"))
+0401000100000000006102
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "a", "b", "c"));
+hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "a", "b", "c"))
+0400000000
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "a", "b", "c", "e"));
+hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "a", "b", "c", "e"))
+0400000000
+select hex(column_delete(column_create("a", 1), "a"));
+hex(column_delete(column_create("a", 1), "a"))
+0400000000
+select hex(column_delete("", "a"));
+hex(column_delete("", "a"))
+
+#
+# MDEV-458 DNAMES: Server crashes on using an unquoted string
+# as a dynamic column name
+#
+select COLUMN_CREATE(color, "black");
+ERROR 42S22: Unknown column 'color' in 'field list'
+#
+# MDEV-489 Assertion `offset < 0x1f' failed in
+# type_and_offset_store on COLUMN_ADD
+#
+CREATE TABLE t1 (f1 tinyblob);
+INSERT INTO t1 VALUES (COLUMN_CREATE('col1', REPEAT('a',30)));
+select column_check(f1) from t1;
+column_check(f1)
+1
+UPDATE t1 SET f1 = COLUMN_ADD( f1, REPEAT('b',211), 'val2' );
+Warnings:
+Warning 1265 Data truncated for column 'f1' at row 1
+select column_check(f1) from t1;
+column_check(f1)
+0
+UPDATE t1 SET f1 = COLUMN_ADD( f1, REPEAT('c',211), 'val3' );
+Warnings:
+Warning 1265 Data truncated for column 'f1' at row 1
+select column_check(f1) from t1;
+column_check(f1)
+0
+drop table t1;
+#
+# MDEV-490/MDEV-491 null as arguments
+#
+SELECT COLUMN_GET( COLUMN_CREATE( 'col', 'val' ), NULL AS CHAR );
+COLUMN_GET( COLUMN_CREATE( 'col', 'val' ), NULL AS CHAR )
+NULL
+SELECT COLUMN_GET( NULL, 'col' as char );
+COLUMN_GET( NULL, 'col' as char )
+NULL
+SELECT COLUMN_EXISTS( COLUMN_CREATE( 'col', 'val' ), NULL);
+COLUMN_EXISTS( COLUMN_CREATE( 'col', 'val' ), NULL)
+NULL
+SELECT COLUMN_EXISTS( NULL, 'col');
+COLUMN_EXISTS( NULL, 'col')
+NULL
+SELECT COLUMN_CREATE( NULL, 'val' );
+COLUMN_CREATE( NULL, 'val' )
+NULL
+SELECT COLUMN_ADD( NULL, 'val', 'col');
+COLUMN_ADD( NULL, 'val', 'col')
+NULL
+#
+# MDEV-488: Assertion `column_name->length < 255' failed on a
+# column name with length 255 (precisely)
+#
+SELECT hex(COLUMN_CREATE(REPEAT('a',255),1));
+hex(COLUMN_CREATE(REPEAT('a',255),1))
+040100FF000000000061616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616161616102
+SELECT hex(COLUMN_CREATE(REPEAT('a',65536),1));
+ERROR 22007: Illegal value used as argument of dynamic column function
+#
+# JSON conversion
+#
+select column_json(column_create("int", -1212 as int, "uint", 12334 as unsigned int, "decimal", "23.344" as decimal, "double", 1.23444e50 as double, "string", 'gdgd\\dhdjh"dhdhd' as char, "time", "0:45:49.000001" AS time, "datetime", "2011-04-05 0:45:49.000001" AS datetime, "date", "2011-04-05" AS date));
+column_json(column_create("int", -1212 as int, "uint", 12334 as unsigned int, "decimal", "23.344" as decimal, "double", 1.23444e50 as double, "string", 'gdgd\\dhdjh"dhdhd' as char, "time", "0:45:49.000001" AS time, "datetime", "2011-04-05 0:45:49.000001"
+{"int":-1212,"date":"2011-04-05","time":"00:45:49.000001","uint":12334,"double":"1.2e50","string":"gdgd\\dhdjh\"dhdhd","decimal":23.344,"datetime":"2011-04-05 00:45:49.000001"}
+select column_json(column_create(1, -1212 as int, 2, 12334 as unsigned int, 3, "23.344" as decimal, 4, 1.23444e50 as double, 5, 'gdgd\\dhdjh"dhdhd' as char, 6, "0:45:49.000001" AS time, 7, "2011-04-05 0:45:49.000001" AS datetime, 8, "2011-04-05" AS date));
+column_json(column_create(1, -1212 as int, 2, 12334 as unsigned int, 3, "23.344" as decimal, 4, 1.23444e50 as double, 5, 'gdgd\\dhdjh"dhdhd' as char, 6, "0:45:49.000001" AS time, 7, "2011-04-05 0:45:49.000001" AS datetime, 8, "2011-04-05" AS date))
+{"1":-1212,"2":12334,"3":23.344,"4":"1.2e50","5":"gdgd\\dhdjh\"dhdhd","6":"00:45:49.000001","7":"2011-04-05 00:45:49.000001","8":"2011-04-05"}
+#
+# CHECK test
+#
+SELECT COLUMN_CHECK(COLUMN_CREATE(1,'a'));
+COLUMN_CHECK(COLUMN_CREATE(1,'a'))
+1
+SELECT COLUMN_CHECK('abracadabra');
+COLUMN_CHECK('abracadabra')
+0
+SELECT COLUMN_CHECK('');
+COLUMN_CHECK('')
+1
+SELECT COLUMN_CHECK(NULL);
+COLUMN_CHECK(NULL)
+NULL
+#
+# escaping check
+#
+select column_json(column_create("string", "'\"/\\`.,whatever")),hex(column_create("string", "'\"/\\`.,whatever"));
+column_json(column_create("string", "'\"/\\`.,whatever")) hex(column_create("string", "'\"/\\`.,whatever"))
+{"string":"'\"/\\`.,whatever"} 040100060000000300737472696E670827222F5C602E2C7768617465766572
+#
+# embedding test
+#
+select column_json(column_create("val", "val", "emb", column_create("val2", "val2")));
+column_json(column_create("val", "val", "emb", column_create("val2", "val2")))
+{"emb":{"val2":"val2"},"val":"val"}
+select column_json(column_create(1, "val", 2, column_create(3, "val2")));
+column_json(column_create(1, "val", 2, column_create(3, "val2")))
+{"1":"val","2":{"3":"val2"}}
+#
+# Time encoding
+#
+select hex(column_create("t", "800:46:06.23434" AS time)) as hex,
+column_json(column_create("t", "800:46:06.23434" AS time)) as json;
+hex json
+04010001000000070074649363B82003 {"t":"800:46:06.234340"}
+select hex(column_create(1, "800:46:06.23434" AS time)) as hex,
+column_json(column_create(1, "800:46:06.23434" AS time)) as json;
+hex json
+000100010007649363B82003 {"1":"800:46:06.234340"}
+select hex(column_create("t", "800:46:06" AS time)) as hex,
+column_json(column_create("t", "800:46:06" AS time)) as json;
+hex json
+04010001000000070074860B32 {"t":"800:46:06"}
+select hex(column_create(1, "800:46:06" AS time)) as hex,
+column_json(column_create(1, "800:46:06" AS time)) as json;
+hex json
+000100010007000060B82003 {"1":"800:46:06"}
+select hex(column_create("t", "2012-12-21 10:46:06.23434" AS datetime)) as hex,
+column_json(column_create("t", "2012-12-21 10:46:06.23434" AS datetime)) as json;
+hex json
+0401000100000005007495B90F649363B80A00 {"t":"2012-12-21 10:46:06.234340"}
+select hex(column_create(1, "2012-12-21 10:46:06.23434" AS datetime)) as hex,
+column_json(column_create(1, "2012-12-21 10:46:06.23434" AS datetime)) as json;
+hex json
+00010001000595B90F649363B80A00 {"1":"2012-12-21 10:46:06.234340"}
+select hex(column_create("t", "2012-12-21 10:46:06" AS datetime)) as hex,
+column_json(column_create("t", "2012-12-21 10:46:06" AS datetime)) as json;
+hex json
+0401000100000005007495B90F86AB00 {"t":"2012-12-21 10:46:06"}
+select hex(column_create(1, "2012-12-21 10:46:06" AS datetime)) as hex,
+column_json(column_create(1, "2012-12-21 10:46:06" AS datetime)) as json;
+hex json
+00010001000595B90F000060B80A00 {"1":"2012-12-21 10:46:06"}
diff --git a/mysql-test/r/filesort_debug.result b/mysql-test/r/filesort_debug.result
index 6d6eb817259..13912ac8756 100644
--- a/mysql-test/r/filesort_debug.result
+++ b/mysql-test/r/filesort_debug.result
@@ -4,7 +4,7 @@ SET @old_debug= @@session.debug;
#
CREATE TABLE t1(f0 int auto_increment primary key, f1 int);
INSERT INTO t1(f1) VALUES (0),(1),(2),(3),(4),(5);
-SET session debug_dbug= '+d,make_sort_keys_alloc_fail';
+SET session debug_dbug= '+d,alloc_sort_buffer_fail';
CALL mtr.add_suppression("Out of sort memory");
SELECT * FROM t1 ORDER BY f1 ASC, f0;
ERROR HY001: Out of sort memory, consider increasing server sort buffer size
diff --git a/mysql-test/r/flush.result b/mysql-test/r/flush.result
index d3b3cd16210..b64351045bf 100644
--- a/mysql-test/r/flush.result
+++ b/mysql-test/r/flush.result
@@ -489,3 +489,10 @@ UNLOCK TABLES;
# Switching to connection 'default'.
COMMIT;
DROP TABLE t1;
+#
+# Test flushing slave or relay logs twice
+#
+flush relay logs,relay logs;
+ERROR HY000: Incorrect usage of FLUSH and RELAY LOGS
+flush slave,slave;
+ERROR HY000: Incorrect usage of FLUSH and SLAVE
diff --git a/mysql-test/r/func_system.result b/mysql-test/r/func_system.result
index fa05021eb47..c0d63200f82 100644
--- a/mysql-test/r/func_system.result
+++ b/mysql-test/r/func_system.result
@@ -25,14 +25,14 @@ user() like _latin1"%@%"
select charset(user());
charset(user())
utf8
-select version()>="3.23.29";
-version()>="3.23.29"
+select version()>="03.23.29";
+version()>="03.23.29"
1
-select version()>=_utf8"3.23.29";
-version()>=_utf8"3.23.29"
+select version()>=_utf8"03.23.29";
+version()>=_utf8"03.23.29"
1
-select version()>=_latin1"3.23.29";
-version()>=_latin1"3.23.29"
+select version()>=_latin1"03.23.29";
+version()>=_latin1"03.23.29"
1
select charset(version());
charset(version())
diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result
index a5491c9c839..dd4b73b0b05 100644
--- a/mysql-test/r/func_time.result
+++ b/mysql-test/r/func_time.result
@@ -1023,11 +1023,11 @@ MAKETIME(CAST(-1 AS UNSIGNED), 0, 0)
Warnings:
Note 1105 Cast to unsigned converted negative integer to it's positive complement
Warning 1292 Truncated incorrect time value: '18446744073709551615:00:00'
-SELECT EXTRACT(HOUR FROM '100000:02:03');
-EXTRACT(HOUR FROM '100000:02:03')
+SELECT EXTRACT(HOUR FROM '10000:02:03');
+EXTRACT(HOUR FROM '10000:02:03')
838
Warnings:
-Warning 1292 Truncated incorrect time value: '100000:02:03'
+Warning 1292 Truncated incorrect time value: '10000:02:03'
CREATE TABLE t1(f1 TIME);
INSERT INTO t1 VALUES('916:00:00 a');
Warnings:
diff --git a/mysql-test/r/function_defaults.result b/mysql-test/r/function_defaults.result
new file mode 100644
index 00000000000..27b9ee0a323
--- /dev/null
+++ b/mysql-test/r/function_defaults.result
@@ -0,0 +1,3067 @@
+#
+# Test of function defaults for any server, including embedded.
+#
+#
+# Function defaults run 1. No microsecond precision.
+#
+SET TIME_ZONE = "+00:00";
+#
+# Test of errors for column data types that dont support function
+# defaults.
+#
+CREATE TABLE t1( a BIT DEFAULT CURRENT_TIMESTAMP );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a TINYINT DEFAULT CURRENT_TIMESTAMP );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a SMALLINT DEFAULT CURRENT_TIMESTAMP );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a MEDIUMINT DEFAULT CURRENT_TIMESTAMP );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a INT DEFAULT CURRENT_TIMESTAMP );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a BIGINT DEFAULT CURRENT_TIMESTAMP );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a FLOAT DEFAULT CURRENT_TIMESTAMP );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a DECIMAL DEFAULT CURRENT_TIMESTAMP );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a DATE DEFAULT CURRENT_TIMESTAMP );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a TIME DEFAULT CURRENT_TIMESTAMP );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a YEAR DEFAULT CURRENT_TIMESTAMP );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a BIT ON UPDATE CURRENT_TIMESTAMP );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a TINYINT ON UPDATE CURRENT_TIMESTAMP );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a SMALLINT ON UPDATE CURRENT_TIMESTAMP );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a MEDIUMINT ON UPDATE CURRENT_TIMESTAMP );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a INT ON UPDATE CURRENT_TIMESTAMP );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a BIGINT ON UPDATE CURRENT_TIMESTAMP );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a FLOAT ON UPDATE CURRENT_TIMESTAMP );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a DECIMAL ON UPDATE CURRENT_TIMESTAMP );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a DATE ON UPDATE CURRENT_TIMESTAMP );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a TIME ON UPDATE CURRENT_TIMESTAMP );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a YEAR ON UPDATE CURRENT_TIMESTAMP );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+#
+# Test that the default clause behaves like NOW() regarding time zones.
+#
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+c TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+d TIMESTAMP NULL,
+e DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+f DATETIME DEFAULT CURRENT_TIMESTAMP,
+g DATETIME ON UPDATE CURRENT_TIMESTAMP,
+h DATETIME
+);
+# 2011-09-27 14:11:08 UTC
+SET TIMESTAMP = 1317132668.654321;
+SET @old_time_zone = @@TIME_ZONE;
+SET TIME_ZONE = "+05:00";
+INSERT INTO t1( d, h ) VALUES ( NOW(), NOW() );
+SELECT * FROM t1;
+a b c d e f g h
+2011-09-27 19:11:08 2011-09-27 19:11:08 0000-00-00 00:00:00 2011-09-27 19:11:08 2011-09-27 19:11:08 2011-09-27 19:11:08 NULL 2011-09-27 19:11:08
+# 1989-05-13 01:02:03
+SET TIMESTAMP = 611017323.543212;
+UPDATE t1 SET d = NOW(), h = NOW();
+SELECT * FROM t1;
+a b c d e f g h
+1989-05-13 04:02:03 2011-09-27 19:11:08 1989-05-13 04:02:03 1989-05-13 04:02:03 1989-05-13 04:02:03 2011-09-27 19:11:08 1989-05-13 04:02:03 1989-05-13 04:02:03
+SET TIME_ZONE = @old_time_zone;
+DROP TABLE t1;
+#
+# Test of several TIMESTAMP columns with different function defaults.
+#
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+c TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+d TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+e TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+f INT
+);
+# 2011-04-19 07:22:02 UTC
+SET TIMESTAMP = 1303197722.534231;
+INSERT INTO t1 ( f ) VALUES (1);
+SELECT * FROM t1;
+a b c d e f
+2011-04-19 07:22:02 2011-04-19 07:22:02 2011-04-19 07:22:02 0000-00-00 00:00:00 0000-00-00 00:00:00 1
+# 2011-04-19 07:23:18 UTC
+SET TIMESTAMP = 1303197798.132435;
+UPDATE t1 SET f = 2;
+SELECT * FROM t1;
+a b c d e f
+2011-04-19 07:23:18 2011-04-19 07:23:18 2011-04-19 07:22:02 2011-04-19 07:23:18 2011-04-19 07:23:18 2
+DROP TABLE t1;
+#
+# Test of inserted values out of order.
+#
+CREATE TABLE t1 (
+a INT,
+b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+c TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+d TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+e TIMESTAMP NULL,
+f DATETIME,
+g DATETIME DEFAULT CURRENT_TIMESTAMP,
+h DATETIME ON UPDATE CURRENT_TIMESTAMP,
+i DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+j INT
+);
+# 2011-04-19 07:22:02 UTC
+SET TIMESTAMP = 1303197722.534231;
+INSERT INTO t1 ( j, a ) VALUES ( 1, 1 );
+SELECT * FROM t1;
+a b c d e f g h i j
+1 2011-04-19 07:22:02 2011-04-19 07:22:02 0000-00-00 00:00:00 NULL NULL 2011-04-19 07:22:02 NULL 2011-04-19 07:22:02 1
+DROP TABLE t1;
+#
+# Test of ON DUPLICATE KEY UPDATE
+#
+CREATE TABLE t1 (
+a INT PRIMARY KEY,
+b INT,
+c TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+d TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+e TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+f TIMESTAMP NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+g TIMESTAMP NULL,
+h DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+i DATETIME DEFAULT CURRENT_TIMESTAMP,
+j DATETIME ON UPDATE CURRENT_TIMESTAMP,
+k DATETIME NULL,
+l DATETIME DEFAULT '1986-09-27 03:00:00.098765'
+);
+# 1977-12-21 23:00:00 UTC
+SET TIMESTAMP = 251593200.192837;
+INSERT INTO t1(a) VALUES (1) ON DUPLICATE KEY UPDATE b = 2;
+SELECT * FROM t1;
+a b c d e f g h i j k l
+1 NULL 1977-12-21 23:00:00 1977-12-21 23:00:00 0000-00-00 00:00:00 1986-09-27 03:00:00 NULL 1977-12-21 23:00:00 1977-12-21 23:00:00 NULL NULL 1986-09-27 03:00:00
+# 1975-05-21 23:00:00 UTC
+SET TIMESTAMP = 169945200.918273;
+INSERT INTO t1(a) VALUES (1) ON DUPLICATE KEY UPDATE b = 2;
+SELECT * FROM t1;
+a b c d e f g h i j k l
+1 2 1975-05-21 23:00:00 1977-12-21 23:00:00 1975-05-21 23:00:00 1986-09-27 03:00:00 NULL 1975-05-21 23:00:00 1977-12-21 23:00:00 1975-05-21 23:00:00 NULL 1986-09-27 03:00:00
+# 1973-08-14 09:11:22 UTC
+SET TIMESTAMP = 114167482.534231;
+INSERT INTO t1(a) VALUES (2) ON DUPLICATE KEY UPDATE b = 2;
+SELECT * FROM t1;
+a b c d e f g h i j k l
+1 2 1975-05-21 23:00:00 1977-12-21 23:00:00 1975-05-21 23:00:00 1986-09-27 03:00:00 NULL 1975-05-21 23:00:00 1977-12-21 23:00:00 1975-05-21 23:00:00 NULL 1986-09-27 03:00:00
+2 NULL 1973-08-14 09:11:22 1973-08-14 09:11:22 0000-00-00 00:00:00 1986-09-27 03:00:00 NULL 1973-08-14 09:11:22 1973-08-14 09:11:22 NULL NULL 1986-09-27 03:00:00
+DROP TABLE t1;
+CREATE TABLE t1 ( a INT PRIMARY KEY, b INT, c TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP );
+# 2011-04-19 07:23:18 UTC
+SET TIMESTAMP = 1303197798.945156;
+INSERT INTO t1 VALUES
+(1, 0, '2001-01-01 01:01:01.111111'),
+(2, 0, '2002-02-02 02:02:02.222222'),
+(3, 0, '2003-03-03 03:03:03.333333');
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01
+2 0 2002-02-02 02:02:02
+3 0 2003-03-03 03:03:03
+UPDATE t1 SET b = 2, c = c WHERE a = 2;
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01
+2 2 2002-02-02 02:02:02
+3 0 2003-03-03 03:03:03
+INSERT INTO t1 (a) VALUES (4);
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01
+2 2 2002-02-02 02:02:02
+3 0 2003-03-03 03:03:03
+4 NULL 2011-04-19 07:23:18
+UPDATE t1 SET c = '2004-04-04 04:04:04.444444' WHERE a = 4;
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01
+2 2 2002-02-02 02:02:02
+3 0 2003-03-03 03:03:03
+4 NULL 2004-04-04 04:04:04
+INSERT INTO t1 ( a ) VALUES ( 3 ), ( 5 ) ON DUPLICATE KEY UPDATE b = 3, c = c;
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01
+2 2 2002-02-02 02:02:02
+3 3 2003-03-03 03:03:03
+4 NULL 2004-04-04 04:04:04
+5 NULL 2011-04-19 07:23:18
+INSERT INTO t1 (a, c) VALUES
+(4, '2004-04-04 00:00:00.444444'),
+(6, '2006-06-06 06:06:06.666666')
+ON DUPLICATE KEY UPDATE b = 4;
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01
+2 2 2002-02-02 02:02:02
+3 3 2003-03-03 03:03:03
+4 4 2011-04-19 07:23:18
+5 NULL 2011-04-19 07:23:18
+6 NULL 2006-06-06 06:06:06
+DROP TABLE t1;
+#
+# Test of REPLACE INTO executed as UPDATE.
+#
+CREATE TABLE t1 (
+a INT PRIMARY KEY,
+b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+c DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+d TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+e DATETIME DEFAULT CURRENT_TIMESTAMP,
+f TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+g DATETIME ON UPDATE CURRENT_TIMESTAMP,
+h TIMESTAMP NULL,
+i DATETIME
+);
+# 1970-09-21 09:11:12 UTC
+SET TIMESTAMP = 22756272.163584;
+REPLACE INTO t1 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b c d e f g h i
+1 1970-09-21 09:11:12 1970-09-21 09:11:12 1970-09-21 09:11:12 1970-09-21 09:11:12 0000-00-00 00:00:00 NULL NULL NULL
+# 1970-11-10 14:16:17 UTC
+SET TIMESTAMP = 27094577.852954;
+REPLACE INTO t1 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b c d e f g h i
+1 1970-11-10 14:16:17 1970-11-10 14:16:17 1970-11-10 14:16:17 1970-11-10 14:16:17 0000-00-00 00:00:00 NULL NULL NULL
+DROP TABLE t1;
+#
+# Test of insertion of NULL, DEFAULT and an empty row for DEFAULT
+# CURRENT_TIMESTAMP.
+#
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+b DATETIME DEFAULT CURRENT_TIMESTAMP,
+c INT
+);
+# 2011-04-20 09:53:41 UTC
+SET TIMESTAMP = 1303293221.163578;
+INSERT INTO t1 VALUES (NULL, NULL, 1), (DEFAULT, DEFAULT, 2);
+INSERT INTO t1 ( a, b, c ) VALUES (NULL, NULL, 3), (DEFAULT, DEFAULT, 4);
+SELECT * FROM t1;
+a b c
+2011-04-20 09:53:41 NULL 1
+2011-04-20 09:53:41 2011-04-20 09:53:41 2
+2011-04-20 09:53:41 NULL 3
+2011-04-20 09:53:41 2011-04-20 09:53:41 4
+SET TIME_ZONE = "+03:00";
+SELECT * FROM t1;
+a b c
+2011-04-20 12:53:41 NULL 1
+2011-04-20 12:53:41 2011-04-20 09:53:41 2
+2011-04-20 12:53:41 NULL 3
+2011-04-20 12:53:41 2011-04-20 09:53:41 4
+SET TIME_ZONE = "+00:00";
+DROP TABLE t1;
+# 2011-04-20 07:05:39 UTC
+SET TIMESTAMP = 1303283139.195624;
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT '2010-10-11 12:34:56' ON UPDATE CURRENT_TIMESTAMP,
+b DATETIME DEFAULT '2010-10-11 12:34:56'
+);
+INSERT INTO t1 VALUES (NULL, NULL), (DEFAULT, DEFAULT);
+INSERT INTO t1 ( a, b ) VALUES (NULL, NULL), (DEFAULT, DEFAULT);
+SELECT * FROM t1;
+a b
+2011-04-20 07:05:39 NULL
+2010-10-11 12:34:56 2010-10-11 12:34:56
+2011-04-20 07:05:39 NULL
+2010-10-11 12:34:56 2010-10-11 12:34:56
+DROP TABLE t1;
+# 2011-04-20 09:53:41 UTC
+SET TIMESTAMP = 1303293221.136952;
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+c TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+d TIMESTAMP NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+e TIMESTAMP NULL,
+f DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+g DATETIME DEFAULT CURRENT_TIMESTAMP,
+h DATETIME ON UPDATE CURRENT_TIMESTAMP,
+i DATETIME NULL,
+j DATETIME DEFAULT '1986-09-27 03:00:00.098765'
+);
+INSERT INTO t1 VALUES ();
+INSERT INTO t1 SELECT NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL;
+SELECT * FROM t1;
+a b c d e f g h i j
+2011-04-20 09:53:41 2011-04-20 09:53:41 0000-00-00 00:00:00 1986-09-27 03:00:00 NULL 2011-04-20 09:53:41 2011-04-20 09:53:41 NULL NULL 1986-09-27 03:00:00
+2011-04-20 09:53:41 2011-04-20 09:53:41 2011-04-20 09:53:41 2011-04-20 09:53:41 NULL NULL NULL NULL NULL NULL
+DROP TABLE t1;
+#
+# Test of multiple-table UPDATE for DEFAULT CURRENT_TIMESTAMP
+#
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+b DATETIME DEFAULT CURRENT_TIMESTAMP,
+c INT
+);
+INSERT INTO t1 ( c ) VALUES (1);
+SELECT * FROM t1;
+a b c
+2011-04-20 09:53:41 2011-04-20 09:53:41 1
+# 2011-04-20 17:06:13 UTC
+SET TIMESTAMP = 1303311973.163587;
+UPDATE t1 t11, t1 t12 SET t11.c = 1;
+SELECT * FROM t1;
+a b c
+2011-04-20 09:53:41 2011-04-20 09:53:41 1
+UPDATE t1 t11, t1 t12 SET t11.c = 2;
+SELECT * FROM t1;
+a b c
+2011-04-20 15:06:13 2011-04-20 09:53:41 2
+DROP TABLE t1;
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+b TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+c DATETIME DEFAULT CURRENT_TIMESTAMP,
+d DATETIME ON UPDATE CURRENT_TIMESTAMP,
+e INT
+);
+CREATE TABLE t2 (
+f INT,
+g DATETIME ON UPDATE CURRENT_TIMESTAMP,
+h DATETIME DEFAULT CURRENT_TIMESTAMP,
+i TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+j TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
+);
+# 1995-03-11 00:02:03 UTC
+SET TIMESTAMP = 794880123.195676;
+INSERT INTO t1 ( e ) VALUES ( 1 ), ( 2 );
+INSERT INTO t2 ( f ) VALUES ( 1 ), ( 2 );
+SELECT * FROM t1;
+a b c d e
+1995-03-11 00:02:03 0000-00-00 00:00:00 1995-03-11 00:02:03 NULL 1
+1995-03-11 00:02:03 0000-00-00 00:00:00 1995-03-11 00:02:03 NULL 2
+SELECT * FROM t2;
+f g h i j
+1 NULL 1995-03-11 00:02:03 0000-00-00 00:00:00 1995-03-11 00:02:03
+2 NULL 1995-03-11 00:02:03 0000-00-00 00:00:00 1995-03-11 00:02:03
+# 1980-12-13 02:02:01 UTC
+SET TIMESTAMP = 345520921.196755;
+UPDATE t1, t2 SET t1.e = 3, t2.f = 4;
+SELECT * FROM t1;
+a b c d e
+1995-03-11 00:02:03 1980-12-13 02:02:01 1995-03-11 00:02:03 1980-12-13 02:02:01 3
+1995-03-11 00:02:03 1980-12-13 02:02:01 1995-03-11 00:02:03 1980-12-13 02:02:01 3
+SELECT * FROM t2;
+f g h i j
+4 1980-12-13 02:02:01 1995-03-11 00:02:03 1980-12-13 02:02:01 1995-03-11 00:02:03
+4 1980-12-13 02:02:01 1995-03-11 00:02:03 1980-12-13 02:02:01 1995-03-11 00:02:03
+DROP TABLE t1, t2;
+#
+# Test of multiple table update with temporary table and on the fly.
+#
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+b DATETIME ON UPDATE CURRENT_TIMESTAMP,
+c INT,
+d INT
+);
+CREATE TABLE t2 (
+a TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+b DATETIME ON UPDATE CURRENT_TIMESTAMP,
+c INT KEY,
+d INT
+);
+INSERT INTO t1 ( c ) VALUES (1), (2);
+INSERT INTO t2 ( c ) VALUES (1), (2);
+# Test of multiple table update done on the fly
+# 2011-04-20 15:06:13 UTC
+SET TIMESTAMP = 1303311973.194685;
+UPDATE t1 JOIN t2 USING ( c ) SET t2.d = 1;
+SELECT * FROM t1;
+a b c d
+0000-00-00 00:00:00 NULL 1 NULL
+0000-00-00 00:00:00 NULL 2 NULL
+SELECT * FROM t2;
+a b c d
+2011-04-20 15:06:13 2011-04-20 15:06:13 1 1
+2011-04-20 15:06:13 2011-04-20 15:06:13 2 1
+# Test of multiple table update done with temporary table.
+# 1979-01-15 03:02:01
+SET TIMESTAMP = 285213721.134679;
+UPDATE t1 JOIN t2 USING ( c ) SET t1.d = 1;
+SELECT * FROM t1;
+a b c d
+1979-01-15 02:02:01 1979-01-15 02:02:01 1 1
+1979-01-15 02:02:01 1979-01-15 02:02:01 2 1
+SELECT * FROM t2;
+a b c d
+2011-04-20 15:06:13 2011-04-20 15:06:13 1 1
+2011-04-20 15:06:13 2011-04-20 15:06:13 2 1
+DROP TABLE t1, t2;
+#
+# Test of ON UPDATE CURRENT_TIMESTAMP.
+#
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+b DATETIME ON UPDATE CURRENT_TIMESTAMP,
+c INT
+);
+# 2011-04-20 09:53:41 UTC
+SET TIMESTAMP = 1303293221.794613;
+INSERT INTO t1 ( c ) VALUES ( 1 );
+SELECT * FROM t1;
+a b c
+0000-00-00 00:00:00 NULL 1
+UPDATE t1 SET c = 1;
+SELECT * FROM t1;
+a b c
+0000-00-00 00:00:00 NULL 1
+UPDATE t1 SET c = 2;
+SELECT * FROM t1;
+a b c
+2011-04-20 09:53:41 2011-04-20 09:53:41 2
+#
+# Test of multiple-table UPDATE for ON UPDATE CURRENT_TIMESTAMP
+#
+# 2011-04-20 15:06:13 UTC
+SET TIMESTAMP = 1303311973.534231;
+UPDATE t1 t11, t1 t12 SET t11.c = 2;
+SELECT * FROM t1;
+a b c
+2011-04-20 09:53:41 2011-04-20 09:53:41 2
+UPDATE t1 t11, t1 t12 SET t11.c = 3;
+SELECT * FROM t1;
+a b c
+2011-04-20 15:06:13 2011-04-20 15:06:13 3
+DROP TABLE t1;
+#
+# Test of a multiple-table update where only one table is updated and
+# the updated table has a primary key.
+#
+CREATE TABLE t1 ( a INT, b INT, PRIMARY KEY (a) );
+INSERT INTO t1 VALUES (1, 1),(2, 2),(3, 3),(4, 4);
+CREATE TABLE t2 ( a INT, b INT );
+INSERT INTO t2 VALUES (1, 1),(2, 2),(3, 3),(4, 4),(5, 5);
+UPDATE t1, t2 SET t1.b = 100 WHERE t1.a = t2.a;
+SELECT * FROM t1;
+a b
+1 100
+2 100
+3 100
+4 100
+SELECT * FROM t2;
+a b
+1 1
+2 2
+3 3
+4 4
+5 5
+DROP TABLE t1, t2;
+#
+# Test of ALTER TABLE, reordering columns.
+#
+CREATE TABLE t1 ( a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, b INT );
+ALTER TABLE t1 MODIFY a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP AFTER b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` int(11) DEFAULT NULL,
+ `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a INT, b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, c TIMESTAMP NULL );
+ALTER TABLE t1 MODIFY b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP FIRST;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `a` int(11) DEFAULT NULL,
+ `c` timestamp NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a INT, b TIMESTAMP NULL );
+ALTER TABLE t1 MODIFY b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP FIRST;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `a` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, b TIMESTAMP NULL );
+ALTER TABLE t1 MODIFY a TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' AFTER b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` timestamp NULL DEFAULT NULL,
+ `a` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, b TIMESTAMP NULL );
+ALTER TABLE t1 MODIFY a TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' AFTER b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` timestamp NULL DEFAULT NULL,
+ `a` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE NOW(), b INT, c TIMESTAMP NULL );
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+ `b` int(11) DEFAULT NULL,
+ `c` timestamp NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+ALTER TABLE t1 MODIFY a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP AFTER b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` int(11) DEFAULT NULL,
+ `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `c` timestamp NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE NOW(), b INT, c TIMESTAMP NULL );
+ALTER TABLE t1 MODIFY c TIMESTAMP NULL FIRST;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` timestamp NULL DEFAULT NULL,
+ `a` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+ `b` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP NOT NULL DEFAULT NOW() ON UPDATE CURRENT_TIMESTAMP, b INT, c TIMESTAMP NULL );
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `b` int(11) DEFAULT NULL,
+ `c` timestamp NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+ALTER TABLE t1 MODIFY a TIMESTAMP NOT NULL DEFAULT NOW() ON UPDATE CURRENT_TIMESTAMP AFTER b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` int(11) DEFAULT NULL,
+ `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `c` timestamp NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP NOT NULL DEFAULT NOW() ON UPDATE CURRENT_TIMESTAMP, b INT, c TIMESTAMP NULL );
+ALTER TABLE t1 MODIFY c TIMESTAMP NULL FIRST;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` timestamp NULL DEFAULT NULL,
+ `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `b` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+#
+# Test of ALTER TABLE, adding columns.
+#
+CREATE TABLE t1 ( a INT );
+ALTER TABLE t1 ADD COLUMN b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+#
+# Test of INSERT SELECT.
+#
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+c DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+d DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
+);
+CREATE TABLE t2 (
+placeholder1 INT,
+placeholder2 INT,
+placeholder3 INT,
+placeholder4 INT,
+a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+b TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00',
+c DATETIME,
+d DATETIME
+);
+# 1977-08-16 15:30:01 UTC
+SET TIMESTAMP = 240589801.654312;
+INSERT INTO t2 (a, b, c, d) VALUES (
+'1977-08-16 15:30:01.123456',
+'1977-08-16 15:30:01.234567',
+'1977-08-16 15:30:01.345678',
+'1977-08-16 15:30:01.456789'
+);
+# 1986-09-27 01:00:00 UTC
+SET TIMESTAMP = 528166800.132435;
+INSERT INTO t1 ( a, c ) SELECT a, c FROM t2;
+SELECT * FROM t1;
+a b c d
+1977-08-16 15:30:01 1986-09-27 01:00:00 1977-08-16 15:30:01 1986-09-27 01:00:00
+DROP TABLE t1, t2;
+#
+# Test of CREATE TABLE SELECT.
+#
+# We test that the columns of the source table are not used to determine
+# function defaults for the receiving table.
+#
+# 1970-04-11 20:13:57 UTC
+SET TIMESTAMP = 8712837.657898;
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+c TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+d TIMESTAMP NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+e TIMESTAMP NULL,
+f DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+g DATETIME DEFAULT CURRENT_TIMESTAMP,
+h DATETIME ON UPDATE CURRENT_TIMESTAMP,
+i DATETIME NULL,
+j DATETIME DEFAULT '1986-09-27 03:00:00.098765'
+);
+INSERT INTO t1 VALUES ();
+# 1971-01-31 21:13:57 UTC
+SET TIMESTAMP = 34200837.164937;
+CREATE TABLE t2 SELECT a FROM t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a
+1970-04-11 20:13:57
+CREATE TABLE t3 SELECT b FROM t1;
+SHOW CREATE TABLE t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `b` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t3;
+b
+1970-04-11 20:13:57
+CREATE TABLE t4 SELECT c FROM t1;
+SHOW CREATE TABLE t4;
+Table Create Table
+t4 CREATE TABLE `t4` (
+ `c` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t4;
+c
+0000-00-00 00:00:00
+CREATE TABLE t5 SELECT d FROM t1;
+SHOW CREATE TABLE t5;
+Table Create Table
+t5 CREATE TABLE `t5` (
+ `d` timestamp NOT NULL DEFAULT '1986-09-27 03:00:00'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t5;
+d
+1986-09-27 03:00:00
+CREATE TABLE t6 SELECT e FROM t1;
+SHOW CREATE TABLE t6;
+Table Create Table
+t6 CREATE TABLE `t6` (
+ `e` timestamp NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t6;
+e
+NULL
+CREATE TABLE t7 SELECT f FROM t1;
+SHOW CREATE TABLE t7;
+Table Create Table
+t7 CREATE TABLE `t7` (
+ `f` datetime DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t7;
+f
+1970-04-11 20:13:57
+CREATE TABLE t8 SELECT g FROM t1;
+SHOW CREATE TABLE t8;
+Table Create Table
+t8 CREATE TABLE `t8` (
+ `g` datetime DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t8;
+g
+1970-04-11 20:13:57
+CREATE TABLE t9 SELECT h FROM t1;
+SHOW CREATE TABLE t9;
+Table Create Table
+t9 CREATE TABLE `t9` (
+ `h` datetime DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t9;
+h
+NULL
+CREATE TABLE t10 SELECT i FROM t1;
+SHOW CREATE TABLE t10;
+Table Create Table
+t10 CREATE TABLE `t10` (
+ `i` datetime DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t10;
+i
+NULL
+CREATE TABLE t11 SELECT j FROM t1;
+SHOW CREATE TABLE t11;
+Table Create Table
+t11 CREATE TABLE `t11` (
+ `j` datetime DEFAULT '1986-09-27 03:00:00'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t11;
+j
+1986-09-27 03:00:00
+CREATE TABLE t12 (
+k TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+l TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+m TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+n TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+o TIMESTAMP NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+p TIMESTAMP NULL,
+q DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+r DATETIME DEFAULT CURRENT_TIMESTAMP,
+s DATETIME ON UPDATE CURRENT_TIMESTAMP,
+t DATETIME NULL,
+u DATETIME DEFAULT '1986-09-27 03:00:00.098765'
+)
+SELECT * FROM t1;
+SHOW CREATE TABLE t12;
+Table Create Table
+t12 CREATE TABLE `t12` (
+ `k` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `l` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `m` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `n` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+ `o` timestamp NOT NULL DEFAULT '1986-09-27 03:00:00',
+ `p` timestamp NULL DEFAULT NULL,
+ `q` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `r` datetime DEFAULT CURRENT_TIMESTAMP,
+ `s` datetime DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP,
+ `t` datetime DEFAULT NULL,
+ `u` datetime DEFAULT '1986-09-27 03:00:00',
+ `a` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
+ `b` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
+ `c` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
+ `d` timestamp NOT NULL DEFAULT '1986-09-27 03:00:00',
+ `e` timestamp NULL DEFAULT NULL,
+ `f` datetime DEFAULT NULL,
+ `g` datetime DEFAULT NULL,
+ `h` datetime DEFAULT NULL,
+ `i` datetime DEFAULT NULL,
+ `j` datetime DEFAULT '1986-09-27 03:00:00'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
+# 1970-04-11 20:13:57 UTC
+SET TIMESTAMP = 8712837.164953;
+CREATE TABLE t1 (
+a DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+b DATETIME DEFAULT CURRENT_TIMESTAMP,
+c DATETIME ON UPDATE CURRENT_TIMESTAMP,
+d DATETIME NULL,
+e DATETIME DEFAULT '1986-09-27 03:00:00.098765'
+);
+INSERT INTO t1 VALUES ();
+# 1971-01-31 20:13:57 UTC
+SET TIMESTAMP = 34200837.915736;
+CREATE TABLE t2 SELECT a FROM t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` datetime DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a
+1970-04-11 20:13:57
+CREATE TABLE t3 SELECT b FROM t1;
+SHOW CREATE TABLE t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `b` datetime DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t3;
+b
+1970-04-11 20:13:57
+CREATE TABLE t4 SELECT c FROM t1;
+SHOW CREATE TABLE t4;
+Table Create Table
+t4 CREATE TABLE `t4` (
+ `c` datetime DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t4;
+c
+NULL
+CREATE TABLE t5 SELECT d FROM t1;
+SHOW CREATE TABLE t5;
+Table Create Table
+t5 CREATE TABLE `t5` (
+ `d` datetime DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t5;
+d
+NULL
+CREATE TABLE t6 SELECT e FROM t1;
+SHOW CREATE TABLE t6;
+Table Create Table
+t6 CREATE TABLE `t6` (
+ `e` datetime DEFAULT '1986-09-27 03:00:00'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t6;
+e
+1986-09-27 03:00:00
+DROP TABLE t1, t2, t3, t4, t5, t6;
+#
+# Test of a CREATE TABLE SELECT that also declared columns. In this case
+# the function default should be de-activated during the execution of the
+# CREATE TABLE statement.
+#
+# 1970-01-01 03:16:40
+SET TIMESTAMP = 1000.987654;
+CREATE TABLE t1 ( a INT );
+INSERT INTO t1 VALUES ( 1 ), ( 2 );
+CREATE TABLE t2 ( b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) SELECT a FROM t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `b` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `a` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SET TIMESTAMP = 2000.876543;
+INSERT INTO t2( a ) VALUES ( 3 );
+SELECT * FROM t2;
+b a
+0000-00-00 00:00:00 1
+0000-00-00 00:00:00 2
+1970-01-01 00:33:20 3
+DROP TABLE t1, t2;
+#
+# Test of updating a view.
+#
+CREATE TABLE t1 ( a INT, b DATETIME DEFAULT CURRENT_TIMESTAMP );
+CREATE TABLE t2 ( a INT, b DATETIME ON UPDATE CURRENT_TIMESTAMP );
+CREATE VIEW v1 AS SELECT * FROM t1;
+SHOW CREATE VIEW v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a`,`t1`.`b` AS `b` from `t1` latin1 latin1_swedish_ci
+CREATE VIEW v2 AS SELECT * FROM t2;
+SHOW CREATE VIEW v2;
+View Create View character_set_client collation_connection
+v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS select `t2`.`a` AS `a`,`t2`.`b` AS `b` from `t2` latin1 latin1_swedish_ci
+# 1971-01-31 21:13:57 UTC
+SET TIMESTAMP = 34200837.348564;
+INSERT INTO v1 ( a ) VALUES ( 1 );
+INSERT INTO v2 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b
+1 1971-01-31 20:13:57
+SELECT * FROM v1;
+a b
+1 1971-01-31 20:13:57
+SELECT * FROM t2;
+a b
+1 NULL
+SELECT * FROM v2;
+a b
+1 NULL
+# 1970-04-11 20:13:57 UTC
+SET TIMESTAMP = 8712837.567332;
+UPDATE v1 SET a = 2;
+UPDATE v2 SET a = 2;
+SELECT * FROM t1;
+a b
+2 1971-01-31 20:13:57
+SELECT * FROM v1;
+a b
+2 1971-01-31 20:13:57
+SELECT * FROM t2;
+a b
+2 1970-04-11 20:13:57
+SELECT * FROM v2;
+a b
+2 1970-04-11 20:13:57
+DROP VIEW v1, v2;
+DROP TABLE t1, t2;
+#
+# Test with stored procedures.
+#
+CREATE TABLE t1 (
+a INT,
+b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+c TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+d TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+e TIMESTAMP NULL,
+f DATETIME DEFAULT CURRENT_TIMESTAMP,
+g DATETIME ON UPDATE CURRENT_TIMESTAMP
+);
+CREATE PROCEDURE p1() INSERT INTO test.t1( a ) VALUES ( 1 );
+CREATE PROCEDURE p2() UPDATE t1 SET a = 2 WHERE a = 1;
+# 1971-01-31 20:13:57 UTC
+SET TIMESTAMP = 34200837.876544;
+CALL p1();
+SELECT * FROM t1;
+a b c d e f g
+1 1971-01-31 20:13:57 1971-01-31 20:13:57 0000-00-00 00:00:00 NULL 1971-01-31 20:13:57 NULL
+# 1970-04-11 21:13:57 UTC
+SET TIMESTAMP = 8712837.143546;
+CALL p2();
+SELECT * FROM t1;
+a b c d e f g
+2 1970-04-11 20:13:57 1971-01-31 20:13:57 1970-04-11 20:13:57 NULL 1971-01-31 20:13:57 1970-04-11 20:13:57
+DROP PROCEDURE p1;
+DROP PROCEDURE p2;
+DROP TABLE t1;
+#
+# Test with triggers.
+#
+CREATE TABLE t1 (
+a INT,
+b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+c TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+d TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+e TIMESTAMP NULL,
+f DATETIME,
+g DATETIME DEFAULT CURRENT_TIMESTAMP,
+h DATETIME ON UPDATE CURRENT_TIMESTAMP,
+i DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
+);
+CREATE TABLE t2 ( a INT );
+CREATE TRIGGER t2_trg BEFORE INSERT ON t2 FOR EACH ROW
+BEGIN
+INSERT INTO t1 ( a ) VALUES ( 1 );
+END|
+# 1971-01-31 21:13:57 UTC
+SET TIMESTAMP = 34200837.978675;
+INSERT INTO t2 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b c d e f g h i
+1 1971-01-31 20:13:57 1971-01-31 20:13:57 0000-00-00 00:00:00 NULL NULL 1971-01-31 20:13:57 NULL 1971-01-31 20:13:57
+DROP TRIGGER t2_trg;
+CREATE TRIGGER t2_trg BEFORE INSERT ON t2 FOR EACH ROW
+BEGIN
+UPDATE t1 SET a = 2;
+END|
+# 1970-04-11 21:13:57 UTC
+SET TIMESTAMP = 8712837.456789;
+INSERT INTO t2 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b c d e f g h i
+2 1970-04-11 20:13:57 1971-01-31 20:13:57 1970-04-11 20:13:57 NULL NULL 1971-01-31 20:13:57 1970-04-11 20:13:57 1970-04-11 20:13:57
+DROP TABLE t1, t2;
+#
+# Test where the assignment target is not a column.
+#
+CREATE TABLE t1 ( a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP );
+CREATE TABLE t2 ( a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP );
+CREATE TABLE t3 ( a TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP );
+CREATE TABLE t4 ( a TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP );
+CREATE VIEW v1 AS SELECT a COLLATE latin1_german1_ci AS b FROM t1;
+CREATE VIEW v2 ( b ) AS SELECT a COLLATE latin1_german1_ci FROM t2;
+CREATE VIEW v3 AS SELECT a COLLATE latin1_german1_ci AS b FROM t3;
+CREATE VIEW v4 ( b ) AS SELECT a COLLATE latin1_german1_ci FROM t4;
+INSERT INTO v1 ( b ) VALUES ( '2007-10-24 00:03:34.010203' );
+SELECT a FROM t1;
+a
+2007-10-24 00:03:34
+INSERT INTO v2 ( b ) VALUES ( '2007-10-24 00:03:34.010203' );
+SELECT a FROM t2;
+a
+2007-10-24 00:03:34
+INSERT INTO t3 VALUES ();
+UPDATE v3 SET b = '2007-10-24 00:03:34.010203';
+SELECT a FROM t3;
+a
+2007-10-24 00:03:34
+INSERT INTO t4 VALUES ();
+UPDATE v4 SET b = '2007-10-24 00:03:34.010203';
+SELECT a FROM t4;
+a
+2007-10-24 00:03:34
+DROP VIEW v1, v2, v3, v4;
+DROP TABLE t1, t2, t3, t4;
+#
+# Test of LOAD DATA/XML INFILE
+# This tests behavior of function defaults for TIMESTAMP and DATETIME
+# columns. during LOAD ... INFILE.
+# As can be seen here, a TIMESTAMP column with only ON UPDATE
+# CURRENT_TIMESTAMP will still have CURRENT_TIMESTAMP inserted on LOAD
+# ... INFILE if the value is missing. For DATETIME columns a NULL value
+# is inserted instead.
+#
+CREATE TABLE t1 (
+a INT,
+b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+c TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+d TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+e TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+f DATETIME,
+g DATETIME DEFAULT CURRENT_TIMESTAMP,
+h DATETIME ON UPDATE CURRENT_TIMESTAMP,
+i DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
+);
+CREATE TABLE t2 (
+a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+c TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+d TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+e DATETIME NOT NULL,
+f DATETIME NOT NULL DEFAULT '1977-01-02 12:13:14',
+g DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL,
+h DATETIME ON UPDATE CURRENT_TIMESTAMP NOT NULL,
+i DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL
+);
+SELECT 1 INTO OUTFILE 't3.dat' FROM dual;
+SELECT NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
+INTO OUTFILE 't4.dat'
+FROM dual;
+SELECT 1, 2 INTO OUTFILE 't5.dat' FROM dual;
+# Mon Aug 1 15:11:19 2011 UTC
+SET TIMESTAMP = 1312211479.918273;
+LOAD DATA INFILE 't3.dat' INTO TABLE t1;
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+SELECT * FROM t1;
+a 1
+b 2011-08-01 15:11:19
+c 2011-08-01 15:11:19
+d 2011-08-01 15:11:19
+e 2011-08-01 15:11:19
+f NULL
+g NULL
+h NULL
+i NULL
+LOAD DATA INFILE 't4.dat' INTO TABLE t2;
+Warnings:
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'e' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'f' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'g' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'h' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'i' at row 1
+SELECT a FROM t2;
+a
+2011-08-01 15:11:19
+SELECT b FROM t2;
+b
+2011-08-01 15:11:19
+SELECT c FROM t2;
+c
+2011-08-01 15:11:19
+SELECT d FROM t2;
+d
+2011-08-01 15:11:19
+# As shown here, supplying a NULL value to a non-nullable
+# column with no default value results in the zero date.
+SELECT e FROM t2;
+e
+0000-00-00 00:00:00
+# As shown here, supplying a NULL value to a non-nullable column with a
+# default value results in the zero date.
+SELECT f FROM t2;
+f
+0000-00-00 00:00:00
+# As shown here, supplying a NULL value to a non-nullable column with a
+# default function results in the zero date.
+SELECT g FROM t2;
+g
+0000-00-00 00:00:00
+# As shown here, supplying a NULL value to a non-nullable DATETIME ON
+# UPDATE CURRENT_TIMESTAMP column with no default value results in the
+# zero date.
+SELECT h FROM t2;
+h
+0000-00-00 00:00:00
+SELECT i FROM t2;
+i
+0000-00-00 00:00:00
+DELETE FROM t1;
+DELETE FROM t2;
+# Read t3 file into t1
+# The syntax will cause a different code path to be taken
+# (read_fixed_length()) than under the LOAD ... INTO TABLE t1 command
+# above. The code in this path is copy-pasted code from the path taken
+# under the syntax used in the previous LOAD command.
+LOAD DATA INFILE 't3.dat' INTO TABLE t1
+FIELDS TERMINATED BY '' ENCLOSED BY '';
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+SELECT b FROM t1;
+b
+2011-08-01 15:11:19
+SELECT c FROM t1;
+c
+2011-08-01 15:11:19
+SELECT d FROM t1;
+d
+2011-08-01 15:11:19
+SELECT e FROM t1;
+e
+2011-08-01 15:11:19
+# Yes, a missing field cannot be NULL using this syntax, so it will
+# zero date instead. Says a comment in read_fixed_length() : "No fields
+# specified in fields_vars list can be NULL in this format."
+# It appears to be by design. This is inconsistent with LOAD DATA INFILE
+# syntax in previous test.
+SELECT f FROM t1;
+f
+0000-00-00 00:00:00
+SELECT g FROM t1;
+g
+0000-00-00 00:00:00
+# See comment above "SELECT f FROM f1".
+SELECT h FROM t1;
+h
+0000-00-00 00:00:00
+SELECT i FROM t1;
+i
+0000-00-00 00:00:00
+DELETE FROM t1;
+LOAD DATA INFILE 't5.dat' INTO TABLE t1 ( a, @dummy );
+SELECT * FROM t1;
+a b c d e f g h i
+1 2011-08-01 15:11:19 2011-08-01 15:11:19 0000-00-00 00:00:00 2011-08-01 15:11:19 NULL 2011-08-01 15:11:19 NULL 2011-08-01 15:11:19
+SELECT @dummy;
+@dummy
+2
+DELETE FROM t1;
+LOAD DATA INFILE 't3.dat' INTO TABLE t1 ( a ) SET c = '2005-06-06 08:09:10';
+SELECT * FROM t1;
+a b c d e f g h i
+1 2011-08-01 15:11:19 2005-06-06 08:09:10 0000-00-00 00:00:00 2011-08-01 15:11:19 NULL 2011-08-01 15:11:19 NULL 2011-08-01 15:11:19
+DELETE FROM t1;
+LOAD DATA INFILE 't3.dat' INTO TABLE t1 ( a ) SET g = '2005-06-06 08:09:10';
+SELECT * FROM t1;
+a b c d e f g h i
+1 2011-08-01 15:11:19 2011-08-01 15:11:19 0000-00-00 00:00:00 2011-08-01 15:11:19 NULL 2005-06-06 08:09:10 NULL 2011-08-01 15:11:19
+DELETE FROM t1;
+# Load a static XML file
+LOAD XML INFILE '../../std_data/onerow.xml' INTO TABLE t1
+ROWS IDENTIFIED BY '<row>';
+Missing tags are treated as NULL
+SELECT * FROM t1;
+a 1
+b 2011-08-01 15:11:19
+c 2011-08-01 15:11:19
+d 2011-08-01 15:11:19
+e 2011-08-01 15:11:19
+f NULL
+g NULL
+h NULL
+i NULL
+DROP TABLE t1, t2;
+#
+# Similar LOAD DATA tests in another form
+#
+# All of this test portion has been run on a pre-WL5874 trunk
+# (except that like_b and like_c didn't exist) and all result
+# differences are a bug.
+# Regarding like_b its definition is the same as b's except
+# that the constant default is replaced with a function
+# default. Our expectation is that like_b would behave
+# like b: if b is set to NULL, or set to 0000-00-00, or set to
+# its default, then the same should apply to like_b. Same for
+# like_c vs c.
+# Mon Aug 1 15:11:19 2011 UTC
+SET TIMESTAMP = 1312211479.089786;
+SELECT 1 INTO OUTFILE "file1.dat" FROM dual;
+SELECT NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
+INTO OUTFILE "file2.dat" FROM dual;
+# Too short row
+CREATE TABLE t1 (
+dummy INT,
+a DATETIME NULL DEFAULT NULL,
+b DATETIME NULL DEFAULT "2011-11-18",
+like_b DATETIME NULL DEFAULT CURRENT_TIMESTAMP,
+c DATETIME NOT NULL DEFAULT "2011-11-18",
+like_c DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
+d TIMESTAMP NULL DEFAULT "2011-05-03" ON UPDATE CURRENT_TIMESTAMP,
+e TIMESTAMP NOT NULL DEFAULT "2011-05-03",
+f TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+g TIMESTAMP NULL DEFAULT NULL,
+h INT NULL,
+i INT NOT NULL DEFAULT 42
+);
+# There is no promotion
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `dummy` int(11) DEFAULT NULL,
+ `a` datetime DEFAULT NULL,
+ `b` datetime DEFAULT '2011-11-18 00:00:00',
+ `like_b` datetime DEFAULT CURRENT_TIMESTAMP,
+ `c` datetime NOT NULL DEFAULT '2011-11-18 00:00:00',
+ `like_c` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `d` timestamp NULL DEFAULT '2011-05-03 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+ `e` timestamp NOT NULL DEFAULT '2011-05-03 00:00:00',
+ `f` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `g` timestamp NULL DEFAULT NULL,
+ `h` int(11) DEFAULT NULL,
+ `i` int(11) NOT NULL DEFAULT '42'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+LOAD DATA INFILE "file1.dat" INTO table t1;
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+# It is strange that "like_b" gets NULL when "b" gets 0. But
+# this is consistent with how "a" gets NULL when "b" gets 0,
+# with how "g" gets NULL when "d" gets 0, and with how "h" gets
+# NULL when "i" gets 0. Looks like "DEFAULT
+# <non-NULL-constant>" is changed to 0, whereas DEFAULT NULL
+# and DEFAULT NOW are changed to NULL.
+SELECT * FROM t1;
+dummy 1
+a NULL
+b 0000-00-00 00:00:00
+like_b NULL
+c 0000-00-00 00:00:00
+like_c 0000-00-00 00:00:00
+d 0000-00-00 00:00:00
+e 2011-08-01 15:11:19
+f 2011-08-01 15:11:19
+g NULL
+h NULL
+i 0
+delete from t1;
+alter table t1
+modify f TIMESTAMP NULL default CURRENT_TIMESTAMP;
+# There is no promotion
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `dummy` int(11) DEFAULT NULL,
+ `a` datetime DEFAULT NULL,
+ `b` datetime DEFAULT '2011-11-18 00:00:00',
+ `like_b` datetime DEFAULT CURRENT_TIMESTAMP,
+ `c` datetime NOT NULL DEFAULT '2011-11-18 00:00:00',
+ `like_c` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `d` timestamp NULL DEFAULT '2011-05-03 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+ `e` timestamp NOT NULL DEFAULT '2011-05-03 00:00:00',
+ `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
+ `g` timestamp NULL DEFAULT NULL,
+ `h` int(11) DEFAULT NULL,
+ `i` int(11) NOT NULL DEFAULT '42'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+LOAD DATA INFILE "file1.dat" INTO table t1;
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+SELECT * FROM t1;
+dummy 1
+a NULL
+b 0000-00-00 00:00:00
+like_b NULL
+c 0000-00-00 00:00:00
+like_c 0000-00-00 00:00:00
+d 0000-00-00 00:00:00
+e 2011-08-01 15:11:19
+f NULL
+g NULL
+h NULL
+i 0
+delete from t1;
+drop table t1;
+# Conclusion derived from trunk's results:
+# DATETIME DEFAULT <non-NULL-constant> (b,c) gets 0000-00-00,
+# DATETIME DEFAULT NULL (a) gets NULL,
+# TIMESTAMP NULL DEFAULT <non-NULL-constant> (d) gets 0000-00-00,
+# TIMESTAMP NULL DEFAULT NULL (g) gets NULL,
+# TIMESTAMP NULL DEFAULT NOW (f after ALTER) gets NULL,
+# TIMESTAMP NOT NULL (f before ALTER, e) gets NOW.
+### Loading NULL ###
+CREATE TABLE t1 (
+dummy INT,
+a DATETIME NULL DEFAULT NULL,
+b DATETIME NULL DEFAULT "2011-11-18",
+like_b DATETIME NULL DEFAULT CURRENT_TIMESTAMP,
+c DATETIME NOT NULL DEFAULT "2011-11-18",
+like_c DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
+d TIMESTAMP NULL DEFAULT "2011-05-03" ON UPDATE CURRENT_TIMESTAMP,
+e TIMESTAMP NOT NULL DEFAULT "2011-05-03",
+f TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+g TIMESTAMP NULL DEFAULT NULL,
+h INT NULL,
+i INT NOT NULL DEFAULT 42
+);
+# There is no promotion
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `dummy` int(11) DEFAULT NULL,
+ `a` datetime DEFAULT NULL,
+ `b` datetime DEFAULT '2011-11-18 00:00:00',
+ `like_b` datetime DEFAULT CURRENT_TIMESTAMP,
+ `c` datetime NOT NULL DEFAULT '2011-11-18 00:00:00',
+ `like_c` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `d` timestamp NULL DEFAULT '2011-05-03 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+ `e` timestamp NOT NULL DEFAULT '2011-05-03 00:00:00',
+ `f` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `g` timestamp NULL DEFAULT NULL,
+ `h` int(11) DEFAULT NULL,
+ `i` int(11) NOT NULL DEFAULT '42'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+LOAD DATA INFILE "file2.dat" INTO table t1;
+Warnings:
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'c' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'like_c' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'i' at row 1
+SELECT * FROM t1;
+dummy NULL
+a NULL
+b NULL
+like_b NULL
+c 0000-00-00 00:00:00
+like_c 0000-00-00 00:00:00
+d NULL
+e 2011-08-01 15:11:19
+f 2011-08-01 15:11:19
+g NULL
+h NULL
+i 0
+delete from t1;
+alter table t1
+modify f TIMESTAMP NULL default CURRENT_TIMESTAMP;
+# There is no promotion
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `dummy` int(11) DEFAULT NULL,
+ `a` datetime DEFAULT NULL,
+ `b` datetime DEFAULT '2011-11-18 00:00:00',
+ `like_b` datetime DEFAULT CURRENT_TIMESTAMP,
+ `c` datetime NOT NULL DEFAULT '2011-11-18 00:00:00',
+ `like_c` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ `d` timestamp NULL DEFAULT '2011-05-03 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+ `e` timestamp NOT NULL DEFAULT '2011-05-03 00:00:00',
+ `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
+ `g` timestamp NULL DEFAULT NULL,
+ `h` int(11) DEFAULT NULL,
+ `i` int(11) NOT NULL DEFAULT '42'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+LOAD DATA INFILE "file2.dat" INTO table t1;
+Warnings:
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'c' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'like_c' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'i' at row 1
+SELECT * FROM t1;
+dummy NULL
+a NULL
+b NULL
+like_b NULL
+c 0000-00-00 00:00:00
+like_c 0000-00-00 00:00:00
+d NULL
+e 2011-08-01 15:11:19
+f NULL
+g NULL
+h NULL
+i 0
+delete from t1;
+# Conclusion derived from trunk's results:
+# DATETIME NULL (a,b) gets NULL,
+# DATETIME NOT NULL (c) gets 0000-00-00,
+# TIMESTAMP NULL (d,f,g) gets NULL,
+# TIMESTAMP NOT NULL (e) gets NOW.
+drop table t1;
+#
+# Test of updatable views with check options. The option can be violated
+# using ON UPDATE updates which is very strange as this offers a loophole
+# in this integrity check.
+#
+SET TIME_ZONE = "+03:00";
+# 1970-01-01 03:16:40
+SET TIMESTAMP = 1000.123456;
+CREATE TABLE t1 ( a INT, b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) ENGINE = INNODB;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+INSERT INTO t1 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b
+1 1970-01-01 03:16:40
+CREATE VIEW v1 AS SELECT * FROM t1 WHERE b <= '1970-01-01 03:16:40.123456'
+WITH CHECK OPTION;
+SELECT * FROM v1;
+a b
+1 1970-01-01 03:16:40
+# 1970-01-01 03:33:20
+SET TIMESTAMP = 2000.000234;
+UPDATE v1 SET a = 2;
+ERROR HY000: CHECK OPTION failed 'test.v1'
+SELECT * FROM t1;
+a b
+1 1970-01-01 03:16:40
+DROP VIEW v1;
+DROP TABLE t1;
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT '1973-08-14 09:11:22.089786' ON UPDATE CURRENT_TIMESTAMP,
+c INT KEY
+);
+# 1973-08-14 09:11:22 UTC
+SET TIMESTAMP = 114167482.534231;
+INSERT INTO t1 ( c ) VALUES ( 1 );
+CREATE VIEW v1 AS
+SELECT *
+FROM t1
+WHERE a >= '1973-08-14 09:11:22'
+WITH LOCAL CHECK OPTION;
+SELECT * FROM v1;
+a c
+1973-08-14 09:11:22 1
+SET TIMESTAMP = 1.126789;
+INSERT INTO v1 ( c ) VALUES ( 1 ) ON DUPLICATE KEY UPDATE c = 2;
+ERROR HY000: CHECK OPTION failed 'test.v1'
+SELECT * FROM v1;
+a c
+1973-08-14 09:11:22 1
+DROP VIEW v1;
+DROP TABLE t1;
+#
+# Bug 13095459 - MULTI-TABLE UPDATE MODIFIES A ROW TWICE
+#
+CREATE TABLE t1 (
+a INT,
+b INT,
+ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+PRIMARY KEY ( a, ts )
+) ENGINE = INNODB;
+INSERT INTO t1( a, b, ts ) VALUES ( 1, 0, '2000-09-28 17:44:34' );
+CREATE TABLE t2 ( a INT ) ENGINE = INNODB;
+INSERT INTO t2 VALUES ( 1 );
+UPDATE t1 STRAIGHT_JOIN t2
+SET t1.b = t1.b + 1
+WHERE t1.a = 1 AND t1.ts >= '2000-09-28 00:00:00';
+SELECT b FROM t1;
+b
+1
+DROP TABLE t1, t2;
+#
+# Bug#11745578: 17392: ALTER TABLE ADD COLUMN TIMESTAMP DEFAULT
+# CURRENT_TIMESTAMP INSERTS ZERO
+#
+SET timestamp = 1000;
+CREATE TABLE t1 ( b INT );
+INSERT INTO t1 VALUES (1);
+ALTER TABLE t1 ADD COLUMN a6 DATETIME DEFAULT NOW() ON UPDATE NOW() FIRST;
+ALTER TABLE t1 ADD COLUMN a5 DATETIME DEFAULT NOW() FIRST;
+ALTER TABLE t1 ADD COLUMN a4 DATETIME ON UPDATE NOW() FIRST;
+ALTER TABLE t1 ADD COLUMN a3 TIMESTAMP NOT NULL DEFAULT NOW() ON UPDATE NOW() FIRST;
+ALTER TABLE t1 ADD COLUMN a2 TIMESTAMP NOT NULL DEFAULT NOW() FIRST;
+ALTER TABLE t1 ADD COLUMN a1 TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE NOW() FIRST;
+ALTER TABLE t1 ADD COLUMN c1 TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE NOW() AFTER b;
+ALTER TABLE t1 ADD COLUMN c2 TIMESTAMP NOT NULL DEFAULT NOW() AFTER c1;
+ALTER TABLE t1 ADD COLUMN c3 TIMESTAMP NOT NULL DEFAULT NOW() ON UPDATE NOW() AFTER c2;
+ALTER TABLE t1 ADD COLUMN c4 DATETIME ON UPDATE NOW() AFTER c3;
+ALTER TABLE t1 ADD COLUMN c5 DATETIME DEFAULT NOW() AFTER c4;
+ALTER TABLE t1 ADD COLUMN c6 DATETIME DEFAULT NOW() ON UPDATE NOW() AFTER c5;
+SELECT * FROM t1;
+a1 a2 a3 a4 a5 a6 b c1 c2 c3 c4 c5 c6
+0000-00-00 00:00:00 1970-01-01 03:16:40 1970-01-01 03:16:40 NULL 1970-01-01 03:16:40 1970-01-01 03:16:40 1 0000-00-00 00:00:00 1970-01-01 03:16:40 1970-01-01 03:16:40 NULL 1970-01-01 03:16:40 1970-01-01 03:16:40
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP NOT NULL DEFAULT NOW() ON UPDATE CURRENT_TIMESTAMP, b DATETIME DEFAULT NOW() );
+INSERT INTO t1 VALUES ();
+SET timestamp = 1000000000;
+ALTER TABLE t1 MODIFY COLUMN a TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP(3);
+ALTER TABLE t1 MODIFY COLUMN b DATETIME(3) DEFAULT CURRENT_TIMESTAMP(3);
+SELECT * FROM t1;
+a b
+1970-01-01 03:16:40.000 1970-01-01 03:16:40.000
+DROP TABLE t1;
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT '1999-12-01 11:22:33' ON UPDATE CURRENT_TIMESTAMP,
+b DATETIME DEFAULT '1999-12-01 11:22:33'
+);
+INSERT INTO t1 VALUES ();
+ALTER TABLE t1 MODIFY COLUMN a TIMESTAMP DEFAULT NOW();
+ALTER TABLE t1 MODIFY COLUMN b DATETIME DEFAULT NOW();
+INSERT INTO t1 VALUES ();
+SELECT * FROM t1;
+a b
+1999-12-01 11:22:33 1999-12-01 11:22:33
+2001-09-09 04:46:40 2001-09-09 04:46:40
+DROP TABLE t1;
+#
+# Function defaults run 2. Six digits scale on seconds precision.
+#
+SET TIME_ZONE = "+00:00";
+#
+# Test of errors for column data types that dont support function
+# defaults.
+#
+CREATE TABLE t1( a BIT DEFAULT CURRENT_TIMESTAMP(6) );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a TINYINT DEFAULT CURRENT_TIMESTAMP(6) );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a SMALLINT DEFAULT CURRENT_TIMESTAMP(6) );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a MEDIUMINT DEFAULT CURRENT_TIMESTAMP(6) );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a INT DEFAULT CURRENT_TIMESTAMP(6) );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a BIGINT DEFAULT CURRENT_TIMESTAMP(6) );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a FLOAT DEFAULT CURRENT_TIMESTAMP(6) );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a DECIMAL DEFAULT CURRENT_TIMESTAMP(6) );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a DATE DEFAULT CURRENT_TIMESTAMP(6) );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a TIME DEFAULT CURRENT_TIMESTAMP(6) );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a YEAR DEFAULT CURRENT_TIMESTAMP(6) );
+ERROR 42000: Invalid default value for 'a'
+CREATE TABLE t1( a BIT ON UPDATE CURRENT_TIMESTAMP(6) );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a TINYINT ON UPDATE CURRENT_TIMESTAMP(6) );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a SMALLINT ON UPDATE CURRENT_TIMESTAMP(6) );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a MEDIUMINT ON UPDATE CURRENT_TIMESTAMP(6) );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a INT ON UPDATE CURRENT_TIMESTAMP(6) );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a BIGINT ON UPDATE CURRENT_TIMESTAMP(6) );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a FLOAT ON UPDATE CURRENT_TIMESTAMP(6) );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a DECIMAL ON UPDATE CURRENT_TIMESTAMP(6) );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a DATE ON UPDATE CURRENT_TIMESTAMP(6) );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a TIME ON UPDATE CURRENT_TIMESTAMP(6) );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+CREATE TABLE t1( a YEAR ON UPDATE CURRENT_TIMESTAMP(6) );
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+#
+# Test that the default clause behaves like NOW() regarding time zones.
+#
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+c TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NULL,
+e DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+f DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+g DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+h DATETIME(6)
+);
+# 2011-09-27 14:11:08 UTC
+SET TIMESTAMP = 1317132668.654321;
+SET @old_time_zone = @@TIME_ZONE;
+SET TIME_ZONE = "+05:00";
+INSERT INTO t1( d, h ) VALUES ( NOW(6), NOW(6) );
+SELECT * FROM t1;
+a b c d e f g h
+2011-09-27 19:11:08.654321 2011-09-27 19:11:08.654321 0000-00-00 00:00:00.000000 2011-09-27 19:11:08.654321 2011-09-27 19:11:08.654321 2011-09-27 19:11:08.654321 NULL 2011-09-27 19:11:08.654321
+# 1989-05-13 01:02:03
+SET TIMESTAMP = 611017323.543212;
+UPDATE t1 SET d = NOW(6), h = NOW(6);
+SELECT * FROM t1;
+a b c d e f g h
+1989-05-13 04:02:03.543212 2011-09-27 19:11:08.654321 1989-05-13 04:02:03.543212 1989-05-13 04:02:03.543212 1989-05-13 04:02:03.543212 2011-09-27 19:11:08.654321 1989-05-13 04:02:03.543212 1989-05-13 04:02:03.543212
+SET TIME_ZONE = @old_time_zone;
+DROP TABLE t1;
+#
+# Test of several TIMESTAMP columns with different function defaults.
+#
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+c TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+e TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+f INT
+);
+# 2011-04-19 07:22:02 UTC
+SET TIMESTAMP = 1303197722.534231;
+INSERT INTO t1 ( f ) VALUES (1);
+SELECT * FROM t1;
+a b c d e f
+2011-04-19 07:22:02.534231 2011-04-19 07:22:02.534231 2011-04-19 07:22:02.534231 0000-00-00 00:00:00.000000 0000-00-00 00:00:00.000000 1
+# 2011-04-19 07:23:18 UTC
+SET TIMESTAMP = 1303197798.132435;
+UPDATE t1 SET f = 2;
+SELECT * FROM t1;
+a b c d e f
+2011-04-19 07:23:18.132435 2011-04-19 07:23:18.132435 2011-04-19 07:22:02.534231 2011-04-19 07:23:18.132435 2011-04-19 07:23:18.132435 2
+DROP TABLE t1;
+#
+# Test of inserted values out of order.
+#
+CREATE TABLE t1 (
+a INT,
+b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+c TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+e TIMESTAMP(6) NULL,
+f DATETIME(6),
+g DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+h DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+i DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+j INT
+);
+# 2011-04-19 07:22:02 UTC
+SET TIMESTAMP = 1303197722.534231;
+INSERT INTO t1 ( j, a ) VALUES ( 1, 1 );
+SELECT * FROM t1;
+a b c d e f g h i j
+1 2011-04-19 07:22:02.534231 2011-04-19 07:22:02.534231 0000-00-00 00:00:00.000000 NULL NULL 2011-04-19 07:22:02.534231 NULL 2011-04-19 07:22:02.534231 1
+DROP TABLE t1;
+#
+# Test of ON DUPLICATE KEY UPDATE
+#
+CREATE TABLE t1 (
+a INT PRIMARY KEY,
+b INT,
+c TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+e TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+f TIMESTAMP(6) NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+g TIMESTAMP(6) NULL,
+h DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+i DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+j DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+k DATETIME(6) NULL,
+l DATETIME(6) DEFAULT '1986-09-27 03:00:00.098765'
+);
+# 1977-12-21 23:00:00 UTC
+SET TIMESTAMP = 251593200.192837;
+INSERT INTO t1(a) VALUES (1) ON DUPLICATE KEY UPDATE b = 2;
+SELECT * FROM t1;
+a b c d e f g h i j k l
+1 NULL 1977-12-21 23:00:00.192837 1977-12-21 23:00:00.192837 0000-00-00 00:00:00.000000 1986-09-27 03:00:00.098765 NULL 1977-12-21 23:00:00.192837 1977-12-21 23:00:00.192837 NULL NULL 1986-09-27 03:00:00.098765
+# 1975-05-21 23:00:00 UTC
+SET TIMESTAMP = 169945200.918273;
+INSERT INTO t1(a) VALUES (1) ON DUPLICATE KEY UPDATE b = 2;
+SELECT * FROM t1;
+a b c d e f g h i j k l
+1 2 1975-05-21 23:00:00.918273 1977-12-21 23:00:00.192837 1975-05-21 23:00:00.918273 1986-09-27 03:00:00.098765 NULL 1975-05-21 23:00:00.918273 1977-12-21 23:00:00.192837 1975-05-21 23:00:00.918273 NULL 1986-09-27 03:00:00.098765
+# 1973-08-14 09:11:22 UTC
+SET TIMESTAMP = 114167482.534231;
+INSERT INTO t1(a) VALUES (2) ON DUPLICATE KEY UPDATE b = 2;
+SELECT * FROM t1;
+a b c d e f g h i j k l
+1 2 1975-05-21 23:00:00.918273 1977-12-21 23:00:00.192837 1975-05-21 23:00:00.918273 1986-09-27 03:00:00.098765 NULL 1975-05-21 23:00:00.918273 1977-12-21 23:00:00.192837 1975-05-21 23:00:00.918273 NULL 1986-09-27 03:00:00.098765
+2 NULL 1973-08-14 09:11:22.534231 1973-08-14 09:11:22.534231 0000-00-00 00:00:00.000000 1986-09-27 03:00:00.098765 NULL 1973-08-14 09:11:22.534231 1973-08-14 09:11:22.534231 NULL NULL 1986-09-27 03:00:00.098765
+DROP TABLE t1;
+CREATE TABLE t1 ( a INT PRIMARY KEY, b INT, c TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6) );
+# 2011-04-19 07:23:18 UTC
+SET TIMESTAMP = 1303197798.945156;
+INSERT INTO t1 VALUES
+(1, 0, '2001-01-01 01:01:01.111111'),
+(2, 0, '2002-02-02 02:02:02.222222'),
+(3, 0, '2003-03-03 03:03:03.333333');
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01.111111
+2 0 2002-02-02 02:02:02.222222
+3 0 2003-03-03 03:03:03.333333
+UPDATE t1 SET b = 2, c = c WHERE a = 2;
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01.111111
+2 2 2002-02-02 02:02:02.222222
+3 0 2003-03-03 03:03:03.333333
+INSERT INTO t1 (a) VALUES (4);
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01.111111
+2 2 2002-02-02 02:02:02.222222
+3 0 2003-03-03 03:03:03.333333
+4 NULL 2011-04-19 07:23:18.945156
+UPDATE t1 SET c = '2004-04-04 04:04:04.444444' WHERE a = 4;
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01.111111
+2 2 2002-02-02 02:02:02.222222
+3 0 2003-03-03 03:03:03.333333
+4 NULL 2004-04-04 04:04:04.444444
+INSERT INTO t1 ( a ) VALUES ( 3 ), ( 5 ) ON DUPLICATE KEY UPDATE b = 3, c = c;
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01.111111
+2 2 2002-02-02 02:02:02.222222
+3 3 2003-03-03 03:03:03.333333
+4 NULL 2004-04-04 04:04:04.444444
+5 NULL 2011-04-19 07:23:18.945156
+INSERT INTO t1 (a, c) VALUES
+(4, '2004-04-04 00:00:00.444444'),
+(6, '2006-06-06 06:06:06.666666')
+ON DUPLICATE KEY UPDATE b = 4;
+SELECT * FROM t1;
+a b c
+1 0 2001-01-01 01:01:01.111111
+2 2 2002-02-02 02:02:02.222222
+3 3 2003-03-03 03:03:03.333333
+4 4 2011-04-19 07:23:18.945156
+5 NULL 2011-04-19 07:23:18.945156
+6 NULL 2006-06-06 06:06:06.666666
+DROP TABLE t1;
+#
+# Test of REPLACE INTO executed as UPDATE.
+#
+CREATE TABLE t1 (
+a INT PRIMARY KEY,
+b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+c DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+e DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+f TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+g DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+h TIMESTAMP(6) NULL,
+i DATETIME(6)
+);
+# 1970-09-21 09:11:12 UTC
+SET TIMESTAMP = 22756272.163584;
+REPLACE INTO t1 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b c d e f g h i
+1 1970-09-21 09:11:12.163584 1970-09-21 09:11:12.163584 1970-09-21 09:11:12.163584 1970-09-21 09:11:12.163584 0000-00-00 00:00:00.000000 NULL NULL NULL
+# 1970-11-10 14:16:17 UTC
+SET TIMESTAMP = 27094577.852954;
+REPLACE INTO t1 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b c d e f g h i
+1 1970-11-10 14:16:17.852954 1970-11-10 14:16:17.852954 1970-11-10 14:16:17.852954 1970-11-10 14:16:17.852954 0000-00-00 00:00:00.000000 NULL NULL NULL
+DROP TABLE t1;
+#
+# Test of insertion of NULL, DEFAULT and an empty row for DEFAULT
+# CURRENT_TIMESTAMP.
+#
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+b DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+c INT
+);
+# 2011-04-20 09:53:41 UTC
+SET TIMESTAMP = 1303293221.163578;
+INSERT INTO t1 VALUES (NULL, NULL, 1), (DEFAULT, DEFAULT, 2);
+INSERT INTO t1 ( a, b, c ) VALUES (NULL, NULL, 3), (DEFAULT, DEFAULT, 4);
+SELECT * FROM t1;
+a b c
+2011-04-20 09:53:41.163578 NULL 1
+2011-04-20 09:53:41.163578 2011-04-20 09:53:41.163578 2
+2011-04-20 09:53:41.163578 NULL 3
+2011-04-20 09:53:41.163578 2011-04-20 09:53:41.163578 4
+SET TIME_ZONE = "+03:00";
+SELECT * FROM t1;
+a b c
+2011-04-20 12:53:41.163578 NULL 1
+2011-04-20 12:53:41.163578 2011-04-20 09:53:41.163578 2
+2011-04-20 12:53:41.163578 NULL 3
+2011-04-20 12:53:41.163578 2011-04-20 09:53:41.163578 4
+SET TIME_ZONE = "+00:00";
+DROP TABLE t1;
+# 2011-04-20 07:05:39 UTC
+SET TIMESTAMP = 1303283139.195624;
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT '2010-10-11 12:34:56' ON UPDATE CURRENT_TIMESTAMP(6),
+b DATETIME(6) DEFAULT '2010-10-11 12:34:56'
+);
+INSERT INTO t1 VALUES (NULL, NULL), (DEFAULT, DEFAULT);
+INSERT INTO t1 ( a, b ) VALUES (NULL, NULL), (DEFAULT, DEFAULT);
+SELECT * FROM t1;
+a b
+2011-04-20 07:05:39.195624 NULL
+2010-10-11 12:34:56.000000 2010-10-11 12:34:56.000000
+2011-04-20 07:05:39.195624 NULL
+2010-10-11 12:34:56.000000 2010-10-11 12:34:56.000000
+DROP TABLE t1;
+# 2011-04-20 09:53:41 UTC
+SET TIMESTAMP = 1303293221.136952;
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+c TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+e TIMESTAMP(6) NULL,
+f DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+g DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+h DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+i DATETIME(6) NULL,
+j DATETIME(6) DEFAULT '1986-09-27 03:00:00.098765'
+);
+INSERT INTO t1 VALUES ();
+INSERT INTO t1 SELECT NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL;
+SELECT * FROM t1;
+a b c d e f g h i j
+2011-04-20 09:53:41.136952 2011-04-20 09:53:41.136952 0000-00-00 00:00:00.000000 1986-09-27 03:00:00.098765 NULL 2011-04-20 09:53:41.136952 2011-04-20 09:53:41.136952 NULL NULL 1986-09-27 03:00:00.098765
+2011-04-20 09:53:41.136952 2011-04-20 09:53:41.136952 2011-04-20 09:53:41.136952 2011-04-20 09:53:41.136952 NULL NULL NULL NULL NULL NULL
+DROP TABLE t1;
+#
+# Test of multiple-table UPDATE for DEFAULT CURRENT_TIMESTAMP
+#
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+b DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+c INT
+);
+INSERT INTO t1 ( c ) VALUES (1);
+SELECT * FROM t1;
+a b c
+2011-04-20 09:53:41.136952 2011-04-20 09:53:41.136952 1
+# 2011-04-20 17:06:13 UTC
+SET TIMESTAMP = 1303311973.163587;
+UPDATE t1 t11, t1 t12 SET t11.c = 1;
+SELECT * FROM t1;
+a b c
+2011-04-20 09:53:41.136952 2011-04-20 09:53:41.136952 1
+UPDATE t1 t11, t1 t12 SET t11.c = 2;
+SELECT * FROM t1;
+a b c
+2011-04-20 15:06:13.163587 2011-04-20 09:53:41.136952 2
+DROP TABLE t1;
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+b TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+c DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+d DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+e INT
+);
+CREATE TABLE t2 (
+f INT,
+g DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+h DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+i TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+j TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6)
+);
+# 1995-03-11 00:02:03 UTC
+SET TIMESTAMP = 794880123.195676;
+INSERT INTO t1 ( e ) VALUES ( 1 ), ( 2 );
+INSERT INTO t2 ( f ) VALUES ( 1 ), ( 2 );
+SELECT * FROM t1;
+a b c d e
+1995-03-11 00:02:03.195676 0000-00-00 00:00:00.000000 1995-03-11 00:02:03.195676 NULL 1
+1995-03-11 00:02:03.195676 0000-00-00 00:00:00.000000 1995-03-11 00:02:03.195676 NULL 2
+SELECT * FROM t2;
+f g h i j
+1 NULL 1995-03-11 00:02:03.195676 0000-00-00 00:00:00.000000 1995-03-11 00:02:03.195676
+2 NULL 1995-03-11 00:02:03.195676 0000-00-00 00:00:00.000000 1995-03-11 00:02:03.195676
+# 1980-12-13 02:02:01 UTC
+SET TIMESTAMP = 345520921.196755;
+UPDATE t1, t2 SET t1.e = 3, t2.f = 4;
+SELECT * FROM t1;
+a b c d e
+1995-03-11 00:02:03.195676 1980-12-13 02:02:01.196755 1995-03-11 00:02:03.195676 1980-12-13 02:02:01.196755 3
+1995-03-11 00:02:03.195676 1980-12-13 02:02:01.196755 1995-03-11 00:02:03.195676 1980-12-13 02:02:01.196755 3
+SELECT * FROM t2;
+f g h i j
+4 1980-12-13 02:02:01.196755 1995-03-11 00:02:03.195676 1980-12-13 02:02:01.196755 1995-03-11 00:02:03.195676
+4 1980-12-13 02:02:01.196755 1995-03-11 00:02:03.195676 1980-12-13 02:02:01.196755 1995-03-11 00:02:03.195676
+DROP TABLE t1, t2;
+#
+# Test of multiple table update with temporary table and on the fly.
+#
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+b DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+c INT,
+d INT
+);
+CREATE TABLE t2 (
+a TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+b DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+c INT KEY,
+d INT
+);
+INSERT INTO t1 ( c ) VALUES (1), (2);
+INSERT INTO t2 ( c ) VALUES (1), (2);
+# Test of multiple table update done on the fly
+# 2011-04-20 15:06:13 UTC
+SET TIMESTAMP = 1303311973.194685;
+UPDATE t1 JOIN t2 USING ( c ) SET t2.d = 1;
+SELECT * FROM t1;
+a b c d
+0000-00-00 00:00:00.000000 NULL 1 NULL
+0000-00-00 00:00:00.000000 NULL 2 NULL
+SELECT * FROM t2;
+a b c d
+2011-04-20 15:06:13.194685 2011-04-20 15:06:13.194685 1 1
+2011-04-20 15:06:13.194685 2011-04-20 15:06:13.194685 2 1
+# Test of multiple table update done with temporary table.
+# 1979-01-15 03:02:01
+SET TIMESTAMP = 285213721.134679;
+UPDATE t1 JOIN t2 USING ( c ) SET t1.d = 1;
+SELECT * FROM t1;
+a b c d
+1979-01-15 02:02:01.134679 1979-01-15 02:02:01.134679 1 1
+1979-01-15 02:02:01.134679 1979-01-15 02:02:01.134679 2 1
+SELECT * FROM t2;
+a b c d
+2011-04-20 15:06:13.194685 2011-04-20 15:06:13.194685 1 1
+2011-04-20 15:06:13.194685 2011-04-20 15:06:13.194685 2 1
+DROP TABLE t1, t2;
+#
+# Test of ON UPDATE CURRENT_TIMESTAMP.
+#
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+b DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+c INT
+);
+# 2011-04-20 09:53:41 UTC
+SET TIMESTAMP = 1303293221.794613;
+INSERT INTO t1 ( c ) VALUES ( 1 );
+SELECT * FROM t1;
+a b c
+0000-00-00 00:00:00.000000 NULL 1
+UPDATE t1 SET c = 1;
+SELECT * FROM t1;
+a b c
+0000-00-00 00:00:00.000000 NULL 1
+UPDATE t1 SET c = 2;
+SELECT * FROM t1;
+a b c
+2011-04-20 09:53:41.794613 2011-04-20 09:53:41.794613 2
+#
+# Test of multiple-table UPDATE for ON UPDATE CURRENT_TIMESTAMP
+#
+# 2011-04-20 15:06:13 UTC
+SET TIMESTAMP = 1303311973.534231;
+UPDATE t1 t11, t1 t12 SET t11.c = 2;
+SELECT * FROM t1;
+a b c
+2011-04-20 09:53:41.794613 2011-04-20 09:53:41.794613 2
+UPDATE t1 t11, t1 t12 SET t11.c = 3;
+SELECT * FROM t1;
+a b c
+2011-04-20 15:06:13.534231 2011-04-20 15:06:13.534231 3
+DROP TABLE t1;
+#
+# Test of a multiple-table update where only one table is updated and
+# the updated table has a primary key.
+#
+CREATE TABLE t1 ( a INT, b INT, PRIMARY KEY (a) );
+INSERT INTO t1 VALUES (1, 1),(2, 2),(3, 3),(4, 4);
+CREATE TABLE t2 ( a INT, b INT );
+INSERT INTO t2 VALUES (1, 1),(2, 2),(3, 3),(4, 4),(5, 5);
+UPDATE t1, t2 SET t1.b = 100 WHERE t1.a = t2.a;
+SELECT * FROM t1;
+a b
+1 100
+2 100
+3 100
+4 100
+SELECT * FROM t2;
+a b
+1 1
+2 2
+3 3
+4 4
+5 5
+DROP TABLE t1, t2;
+#
+# Test of ALTER TABLE, reordering columns.
+#
+CREATE TABLE t1 ( a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), b INT );
+ALTER TABLE t1 MODIFY a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6) AFTER b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` int(11) DEFAULT NULL,
+ `a` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a INT, b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), c TIMESTAMP(6) NULL );
+ALTER TABLE t1 MODIFY b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6) FIRST;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+ `a` int(11) DEFAULT NULL,
+ `c` timestamp(6) NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a INT, b TIMESTAMP(6) NULL );
+ALTER TABLE t1 MODIFY b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6) FIRST;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+ `a` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), b TIMESTAMP(6) NULL );
+ALTER TABLE t1 MODIFY a TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' AFTER b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` timestamp(6) NULL DEFAULT NULL,
+ `a` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), b TIMESTAMP(6) NULL );
+ALTER TABLE t1 MODIFY a TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' AFTER b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` timestamp(6) NULL DEFAULT NULL,
+ `a` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE NOW(6), b INT, c TIMESTAMP(6) NULL );
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000' ON UPDATE CURRENT_TIMESTAMP(6),
+ `b` int(11) DEFAULT NULL,
+ `c` timestamp(6) NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+ALTER TABLE t1 MODIFY a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6) AFTER b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` int(11) DEFAULT NULL,
+ `a` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+ `c` timestamp(6) NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE NOW(6), b INT, c TIMESTAMP(6) NULL );
+ALTER TABLE t1 MODIFY c TIMESTAMP(6) NULL FIRST;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` timestamp(6) NULL DEFAULT NULL,
+ `a` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000' ON UPDATE CURRENT_TIMESTAMP(6),
+ `b` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP(6) NOT NULL DEFAULT NOW(6) ON UPDATE CURRENT_TIMESTAMP(6), b INT, c TIMESTAMP(6) NULL );
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+ `b` int(11) DEFAULT NULL,
+ `c` timestamp(6) NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+ALTER TABLE t1 MODIFY a TIMESTAMP(6) NOT NULL DEFAULT NOW(6) ON UPDATE CURRENT_TIMESTAMP(6) AFTER b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` int(11) DEFAULT NULL,
+ `a` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+ `c` timestamp(6) NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP(6) NOT NULL DEFAULT NOW(6) ON UPDATE CURRENT_TIMESTAMP(6), b INT, c TIMESTAMP(6) NULL );
+ALTER TABLE t1 MODIFY c TIMESTAMP(6) NULL FIRST;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` timestamp(6) NULL DEFAULT NULL,
+ `a` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+ `b` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+#
+# Test of ALTER TABLE, adding columns.
+#
+CREATE TABLE t1 ( a INT );
+ALTER TABLE t1 ADD COLUMN b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+#
+# Test of INSERT SELECT.
+#
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+c DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+d DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)
+);
+CREATE TABLE t2 (
+placeholder1 INT,
+placeholder2 INT,
+placeholder3 INT,
+placeholder4 INT,
+a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+b TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00',
+c DATETIME(6),
+d DATETIME(6)
+);
+# 1977-08-16 15:30:01 UTC
+SET TIMESTAMP = 240589801.654312;
+INSERT INTO t2 (a, b, c, d) VALUES (
+'1977-08-16 15:30:01.123456',
+'1977-08-16 15:30:01.234567',
+'1977-08-16 15:30:01.345678',
+'1977-08-16 15:30:01.456789'
+);
+# 1986-09-27 01:00:00 UTC
+SET TIMESTAMP = 528166800.132435;
+INSERT INTO t1 ( a, c ) SELECT a, c FROM t2;
+SELECT * FROM t1;
+a b c d
+1977-08-16 15:30:01.123456 1986-09-27 01:00:00.132435 1977-08-16 15:30:01.345678 1986-09-27 01:00:00.132435
+DROP TABLE t1, t2;
+#
+# Test of CREATE TABLE SELECT.
+#
+# We test that the columns of the source table are not used to determine
+# function defaults for the receiving table.
+#
+# 1970-04-11 20:13:57 UTC
+SET TIMESTAMP = 8712837.657898;
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+c TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+e TIMESTAMP(6) NULL,
+f DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+g DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+h DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+i DATETIME(6) NULL,
+j DATETIME(6) DEFAULT '1986-09-27 03:00:00.098765'
+);
+INSERT INTO t1 VALUES ();
+# 1971-01-31 21:13:57 UTC
+SET TIMESTAMP = 34200837.164937;
+CREATE TABLE t2 SELECT a FROM t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a
+1970-04-11 20:13:57.657897
+CREATE TABLE t3 SELECT b FROM t1;
+SHOW CREATE TABLE t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `b` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t3;
+b
+1970-04-11 20:13:57.657897
+CREATE TABLE t4 SELECT c FROM t1;
+SHOW CREATE TABLE t4;
+Table Create Table
+t4 CREATE TABLE `t4` (
+ `c` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t4;
+c
+0000-00-00 00:00:00.000000
+CREATE TABLE t5 SELECT d FROM t1;
+SHOW CREATE TABLE t5;
+Table Create Table
+t5 CREATE TABLE `t5` (
+ `d` timestamp(6) NOT NULL DEFAULT '1986-09-27 03:00:00.098765'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t5;
+d
+1986-09-27 03:00:00.098765
+CREATE TABLE t6 SELECT e FROM t1;
+SHOW CREATE TABLE t6;
+Table Create Table
+t6 CREATE TABLE `t6` (
+ `e` timestamp(6) NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t6;
+e
+NULL
+CREATE TABLE t7 SELECT f FROM t1;
+SHOW CREATE TABLE t7;
+Table Create Table
+t7 CREATE TABLE `t7` (
+ `f` datetime(6) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t7;
+f
+1970-04-11 20:13:57.657897
+CREATE TABLE t8 SELECT g FROM t1;
+SHOW CREATE TABLE t8;
+Table Create Table
+t8 CREATE TABLE `t8` (
+ `g` datetime(6) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t8;
+g
+1970-04-11 20:13:57.657897
+CREATE TABLE t9 SELECT h FROM t1;
+SHOW CREATE TABLE t9;
+Table Create Table
+t9 CREATE TABLE `t9` (
+ `h` datetime(6) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t9;
+h
+NULL
+CREATE TABLE t10 SELECT i FROM t1;
+SHOW CREATE TABLE t10;
+Table Create Table
+t10 CREATE TABLE `t10` (
+ `i` datetime(6) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t10;
+i
+NULL
+CREATE TABLE t11 SELECT j FROM t1;
+SHOW CREATE TABLE t11;
+Table Create Table
+t11 CREATE TABLE `t11` (
+ `j` datetime(6) DEFAULT '1986-09-27 03:00:00.098765'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t11;
+j
+1986-09-27 03:00:00.098765
+CREATE TABLE t12 (
+k TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+l TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+m TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+n TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+o TIMESTAMP(6) NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+p TIMESTAMP(6) NULL,
+q DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+r DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+s DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+t DATETIME(6) NULL,
+u DATETIME(6) DEFAULT '1986-09-27 03:00:00.098765'
+)
+SELECT * FROM t1;
+SHOW CREATE TABLE t12;
+Table Create Table
+t12 CREATE TABLE `t12` (
+ `k` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+ `l` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+ `m` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+ `n` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000' ON UPDATE CURRENT_TIMESTAMP(6),
+ `o` timestamp(6) NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+ `p` timestamp(6) NULL DEFAULT NULL,
+ `q` datetime(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+ `r` datetime(6) DEFAULT CURRENT_TIMESTAMP(6),
+ `s` datetime(6) DEFAULT NULL ON UPDATE CURRENT_TIMESTAMP(6),
+ `t` datetime(6) DEFAULT NULL,
+ `u` datetime(6) DEFAULT '1986-09-27 03:00:00.098765',
+ `a` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `b` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `c` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `d` timestamp(6) NOT NULL DEFAULT '1986-09-27 03:00:00.098765',
+ `e` timestamp(6) NULL DEFAULT NULL,
+ `f` datetime(6) DEFAULT NULL,
+ `g` datetime(6) DEFAULT NULL,
+ `h` datetime(6) DEFAULT NULL,
+ `i` datetime(6) DEFAULT NULL,
+ `j` datetime(6) DEFAULT '1986-09-27 03:00:00.098765'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12;
+# 1970-04-11 20:13:57 UTC
+SET TIMESTAMP = 8712837.164953;
+CREATE TABLE t1 (
+a DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+b DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+c DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+d DATETIME(6) NULL,
+e DATETIME(6) DEFAULT '1986-09-27 03:00:00.098765'
+);
+INSERT INTO t1 VALUES ();
+# 1971-01-31 20:13:57 UTC
+SET TIMESTAMP = 34200837.915736;
+CREATE TABLE t2 SELECT a FROM t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` datetime(6) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a
+1970-04-11 20:13:57.164953
+CREATE TABLE t3 SELECT b FROM t1;
+SHOW CREATE TABLE t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `b` datetime(6) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t3;
+b
+1970-04-11 20:13:57.164953
+CREATE TABLE t4 SELECT c FROM t1;
+SHOW CREATE TABLE t4;
+Table Create Table
+t4 CREATE TABLE `t4` (
+ `c` datetime(6) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t4;
+c
+NULL
+CREATE TABLE t5 SELECT d FROM t1;
+SHOW CREATE TABLE t5;
+Table Create Table
+t5 CREATE TABLE `t5` (
+ `d` datetime(6) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t5;
+d
+NULL
+CREATE TABLE t6 SELECT e FROM t1;
+SHOW CREATE TABLE t6;
+Table Create Table
+t6 CREATE TABLE `t6` (
+ `e` datetime(6) DEFAULT '1986-09-27 03:00:00.098765'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM t6;
+e
+1986-09-27 03:00:00.098765
+DROP TABLE t1, t2, t3, t4, t5, t6;
+#
+# Test of a CREATE TABLE SELECT that also declared columns. In this case
+# the function default should be de-activated during the execution of the
+# CREATE TABLE statement.
+#
+# 1970-01-01 03:16:40
+SET TIMESTAMP = 1000.987654;
+CREATE TABLE t1 ( a INT );
+INSERT INTO t1 VALUES ( 1 ), ( 2 );
+CREATE TABLE t2 ( b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)) SELECT a FROM t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `b` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+ `a` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SET TIMESTAMP = 2000.876543;
+INSERT INTO t2( a ) VALUES ( 3 );
+SELECT * FROM t2;
+b a
+0000-00-00 00:00:00.000000 1
+0000-00-00 00:00:00.000000 2
+1970-01-01 00:33:20.876543 3
+DROP TABLE t1, t2;
+#
+# Test of updating a view.
+#
+CREATE TABLE t1 ( a INT, b DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) );
+CREATE TABLE t2 ( a INT, b DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6) );
+CREATE VIEW v1 AS SELECT * FROM t1;
+SHOW CREATE VIEW v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a`,`t1`.`b` AS `b` from `t1` latin1 latin1_swedish_ci
+CREATE VIEW v2 AS SELECT * FROM t2;
+SHOW CREATE VIEW v2;
+View Create View character_set_client collation_connection
+v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS select `t2`.`a` AS `a`,`t2`.`b` AS `b` from `t2` latin1 latin1_swedish_ci
+# 1971-01-31 21:13:57 UTC
+SET TIMESTAMP = 34200837.348564;
+INSERT INTO v1 ( a ) VALUES ( 1 );
+INSERT INTO v2 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b
+1 1971-01-31 20:13:57.348564
+SELECT * FROM v1;
+a b
+1 1971-01-31 20:13:57.348564
+SELECT * FROM t2;
+a b
+1 NULL
+SELECT * FROM v2;
+a b
+1 NULL
+# 1970-04-11 20:13:57 UTC
+SET TIMESTAMP = 8712837.567332;
+UPDATE v1 SET a = 2;
+UPDATE v2 SET a = 2;
+SELECT * FROM t1;
+a b
+2 1971-01-31 20:13:57.348564
+SELECT * FROM v1;
+a b
+2 1971-01-31 20:13:57.348564
+SELECT * FROM t2;
+a b
+2 1970-04-11 20:13:57.567332
+SELECT * FROM v2;
+a b
+2 1970-04-11 20:13:57.567332
+DROP VIEW v1, v2;
+DROP TABLE t1, t2;
+#
+# Test with stored procedures.
+#
+CREATE TABLE t1 (
+a INT,
+b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+c TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+e TIMESTAMP(6) NULL,
+f DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+g DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6)
+);
+CREATE PROCEDURE p1() INSERT INTO test.t1( a ) VALUES ( 1 );
+CREATE PROCEDURE p2() UPDATE t1 SET a = 2 WHERE a = 1;
+# 1971-01-31 20:13:57 UTC
+SET TIMESTAMP = 34200837.876544;
+CALL p1();
+SELECT * FROM t1;
+a b c d e f g
+1 1971-01-31 20:13:57.876544 1971-01-31 20:13:57.876544 0000-00-00 00:00:00.000000 NULL 1971-01-31 20:13:57.876544 NULL
+# 1970-04-11 21:13:57 UTC
+SET TIMESTAMP = 8712837.143546;
+CALL p2();
+SELECT * FROM t1;
+a b c d e f g
+2 1970-04-11 20:13:57.143546 1971-01-31 20:13:57.876544 1970-04-11 20:13:57.143546 NULL 1971-01-31 20:13:57.876544 1970-04-11 20:13:57.143546
+DROP PROCEDURE p1;
+DROP PROCEDURE p2;
+DROP TABLE t1;
+#
+# Test with triggers.
+#
+CREATE TABLE t1 (
+a INT,
+b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+c TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+e TIMESTAMP(6) NULL,
+f DATETIME(6),
+g DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+h DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+i DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)
+);
+CREATE TABLE t2 ( a INT );
+CREATE TRIGGER t2_trg BEFORE INSERT ON t2 FOR EACH ROW
+BEGIN
+INSERT INTO t1 ( a ) VALUES ( 1 );
+END|
+# 1971-01-31 21:13:57 UTC
+SET TIMESTAMP = 34200837.978675;
+INSERT INTO t2 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b c d e f g h i
+1 1971-01-31 20:13:57.978675 1971-01-31 20:13:57.978675 0000-00-00 00:00:00.000000 NULL NULL 1971-01-31 20:13:57.978675 NULL 1971-01-31 20:13:57.978675
+DROP TRIGGER t2_trg;
+CREATE TRIGGER t2_trg BEFORE INSERT ON t2 FOR EACH ROW
+BEGIN
+UPDATE t1 SET a = 2;
+END|
+# 1970-04-11 21:13:57 UTC
+SET TIMESTAMP = 8712837.456789;
+INSERT INTO t2 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b c d e f g h i
+2 1970-04-11 20:13:57.456789 1971-01-31 20:13:57.978675 1970-04-11 20:13:57.456789 NULL NULL 1971-01-31 20:13:57.978675 1970-04-11 20:13:57.456789 1970-04-11 20:13:57.456789
+DROP TABLE t1, t2;
+#
+# Test where the assignment target is not a column.
+#
+CREATE TABLE t1 ( a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6) );
+CREATE TABLE t2 ( a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6) );
+CREATE TABLE t3 ( a TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6) );
+CREATE TABLE t4 ( a TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6) );
+CREATE VIEW v1 AS SELECT a COLLATE latin1_german1_ci AS b FROM t1;
+CREATE VIEW v2 ( b ) AS SELECT a COLLATE latin1_german1_ci FROM t2;
+CREATE VIEW v3 AS SELECT a COLLATE latin1_german1_ci AS b FROM t3;
+CREATE VIEW v4 ( b ) AS SELECT a COLLATE latin1_german1_ci FROM t4;
+INSERT INTO v1 ( b ) VALUES ( '2007-10-24 00:03:34.010203' );
+SELECT a FROM t1;
+a
+2007-10-24 00:03:34.010203
+INSERT INTO v2 ( b ) VALUES ( '2007-10-24 00:03:34.010203' );
+SELECT a FROM t2;
+a
+2007-10-24 00:03:34.010203
+INSERT INTO t3 VALUES ();
+UPDATE v3 SET b = '2007-10-24 00:03:34.010203';
+SELECT a FROM t3;
+a
+2007-10-24 00:03:34.010203
+INSERT INTO t4 VALUES ();
+UPDATE v4 SET b = '2007-10-24 00:03:34.010203';
+SELECT a FROM t4;
+a
+2007-10-24 00:03:34.010203
+DROP VIEW v1, v2, v3, v4;
+DROP TABLE t1, t2, t3, t4;
+#
+# Test of LOAD DATA/XML INFILE
+# This tests behavior of function defaults for TIMESTAMP and DATETIME
+# columns. during LOAD ... INFILE.
+# As can be seen here, a TIMESTAMP column with only ON UPDATE
+# CURRENT_TIMESTAMP will still have CURRENT_TIMESTAMP inserted on LOAD
+# ... INFILE if the value is missing. For DATETIME columns a NULL value
+# is inserted instead.
+#
+CREATE TABLE t1 (
+a INT,
+b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+c TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+e TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+f DATETIME(6),
+g DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+h DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+i DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)
+);
+CREATE TABLE t2 (
+a TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+c TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+e DATETIME(6) NOT NULL,
+f DATETIME(6) NOT NULL DEFAULT '1977-01-02 12:13:14',
+g DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) NOT NULL,
+h DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6) NOT NULL,
+i DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6) NOT NULL
+);
+SELECT 1 INTO OUTFILE 't3.dat' FROM dual;
+SELECT NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
+INTO OUTFILE 't4.dat'
+FROM dual;
+SELECT 1, 2 INTO OUTFILE 't5.dat' FROM dual;
+# Mon Aug 1 15:11:19 2011 UTC
+SET TIMESTAMP = 1312211479.918273;
+LOAD DATA INFILE 't3.dat' INTO TABLE t1;
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+SELECT * FROM t1;
+a 1
+b 2011-08-01 15:11:19.918273
+c 2011-08-01 15:11:19.918273
+d 2011-08-01 15:11:19.918273
+e 2011-08-01 15:11:19.918273
+f NULL
+g NULL
+h NULL
+i NULL
+LOAD DATA INFILE 't4.dat' INTO TABLE t2;
+Warnings:
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'e' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'f' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'g' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'h' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'i' at row 1
+SELECT a FROM t2;
+a
+2011-08-01 15:11:19.918273
+SELECT b FROM t2;
+b
+2011-08-01 15:11:19.918273
+SELECT c FROM t2;
+c
+2011-08-01 15:11:19.918273
+SELECT d FROM t2;
+d
+2011-08-01 15:11:19.918273
+# As shown here, supplying a NULL value to a non-nullable
+# column with no default value results in the zero date.
+SELECT e FROM t2;
+e
+0000-00-00 00:00:00.000000
+# As shown here, supplying a NULL value to a non-nullable column with a
+# default value results in the zero date.
+SELECT f FROM t2;
+f
+0000-00-00 00:00:00.000000
+# As shown here, supplying a NULL value to a non-nullable column with a
+# default function results in the zero date.
+SELECT g FROM t2;
+g
+0000-00-00 00:00:00.000000
+# As shown here, supplying a NULL value to a non-nullable DATETIME ON
+# UPDATE CURRENT_TIMESTAMP column with no default value results in the
+# zero date.
+SELECT h FROM t2;
+h
+0000-00-00 00:00:00.000000
+SELECT i FROM t2;
+i
+0000-00-00 00:00:00.000000
+DELETE FROM t1;
+DELETE FROM t2;
+# Read t3 file into t1
+# The syntax will cause a different code path to be taken
+# (read_fixed_length()) than under the LOAD ... INTO TABLE t1 command
+# above. The code in this path is copy-pasted code from the path taken
+# under the syntax used in the previous LOAD command.
+LOAD DATA INFILE 't3.dat' INTO TABLE t1
+FIELDS TERMINATED BY '' ENCLOSED BY '';
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+SELECT b FROM t1;
+b
+2011-08-01 15:11:19.918273
+SELECT c FROM t1;
+c
+2011-08-01 15:11:19.918273
+SELECT d FROM t1;
+d
+2011-08-01 15:11:19.918273
+SELECT e FROM t1;
+e
+2011-08-01 15:11:19.918273
+# Yes, a missing field cannot be NULL using this syntax, so it will
+# zero date instead. Says a comment in read_fixed_length() : "No fields
+# specified in fields_vars list can be NULL in this format."
+# It appears to be by design. This is inconsistent with LOAD DATA INFILE
+# syntax in previous test.
+SELECT f FROM t1;
+f
+0000-00-00 00:00:00.000000
+SELECT g FROM t1;
+g
+0000-00-00 00:00:00.000000
+# See comment above "SELECT f FROM f1".
+SELECT h FROM t1;
+h
+0000-00-00 00:00:00.000000
+SELECT i FROM t1;
+i
+0000-00-00 00:00:00.000000
+DELETE FROM t1;
+LOAD DATA INFILE 't5.dat' INTO TABLE t1 ( a, @dummy );
+SELECT * FROM t1;
+a b c d e f g h i
+1 2011-08-01 15:11:19.918273 2011-08-01 15:11:19.918273 0000-00-00 00:00:00.000000 2011-08-01 15:11:19.918273 NULL 2011-08-01 15:11:19.918273 NULL 2011-08-01 15:11:19.918273
+SELECT @dummy;
+@dummy
+2
+DELETE FROM t1;
+LOAD DATA INFILE 't3.dat' INTO TABLE t1 ( a ) SET c = '2005-06-06 08:09:10';
+SELECT * FROM t1;
+a b c d e f g h i
+1 2011-08-01 15:11:19.918273 2005-06-06 08:09:10.000000 0000-00-00 00:00:00.000000 2011-08-01 15:11:19.918273 NULL 2011-08-01 15:11:19.918273 NULL 2011-08-01 15:11:19.918273
+DELETE FROM t1;
+LOAD DATA INFILE 't3.dat' INTO TABLE t1 ( a ) SET g = '2005-06-06 08:09:10';
+SELECT * FROM t1;
+a b c d e f g h i
+1 2011-08-01 15:11:19.918273 2011-08-01 15:11:19.918273 0000-00-00 00:00:00.000000 2011-08-01 15:11:19.918273 NULL 2005-06-06 08:09:10.000000 NULL 2011-08-01 15:11:19.918273
+DELETE FROM t1;
+# Load a static XML file
+LOAD XML INFILE '../../std_data/onerow.xml' INTO TABLE t1
+ROWS IDENTIFIED BY '<row>';
+Missing tags are treated as NULL
+SELECT * FROM t1;
+a 1
+b 2011-08-01 15:11:19.918273
+c 2011-08-01 15:11:19.918273
+d 2011-08-01 15:11:19.918273
+e 2011-08-01 15:11:19.918273
+f NULL
+g NULL
+h NULL
+i NULL
+DROP TABLE t1, t2;
+#
+# Similar LOAD DATA tests in another form
+#
+# All of this test portion has been run on a pre-WL5874 trunk
+# (except that like_b and like_c didn't exist) and all result
+# differences are a bug.
+# Regarding like_b its definition is the same as b's except
+# that the constant default is replaced with a function
+# default. Our expectation is that like_b would behave
+# like b: if b is set to NULL, or set to 0000-00-00, or set to
+# its default, then the same should apply to like_b. Same for
+# like_c vs c.
+# Mon Aug 1 15:11:19 2011 UTC
+SET TIMESTAMP = 1312211479.089786;
+SELECT 1 INTO OUTFILE "file1.dat" FROM dual;
+SELECT NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
+INTO OUTFILE "file2.dat" FROM dual;
+# Too short row
+CREATE TABLE t1 (
+dummy INT,
+a DATETIME(6) NULL DEFAULT NULL,
+b DATETIME(6) NULL DEFAULT "2011-11-18",
+like_b DATETIME(6) NULL DEFAULT CURRENT_TIMESTAMP(6),
+c DATETIME(6) NOT NULL DEFAULT "2011-11-18",
+like_c DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NULL DEFAULT "2011-05-03" ON UPDATE CURRENT_TIMESTAMP(6),
+e TIMESTAMP(6) NOT NULL DEFAULT "2011-05-03",
+f TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+g TIMESTAMP(6) NULL DEFAULT NULL,
+h INT NULL,
+i INT NOT NULL DEFAULT 42
+);
+# There is no promotion
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `dummy` int(11) DEFAULT NULL,
+ `a` datetime(6) DEFAULT NULL,
+ `b` datetime(6) DEFAULT '2011-11-18 00:00:00.000000',
+ `like_b` datetime(6) DEFAULT CURRENT_TIMESTAMP(6),
+ `c` datetime(6) NOT NULL DEFAULT '2011-11-18 00:00:00.000000',
+ `like_c` datetime(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+ `d` timestamp(6) NULL DEFAULT '2011-05-03 00:00:00.000000' ON UPDATE CURRENT_TIMESTAMP(6),
+ `e` timestamp(6) NOT NULL DEFAULT '2011-05-03 00:00:00.000000',
+ `f` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+ `g` timestamp(6) NULL DEFAULT NULL,
+ `h` int(11) DEFAULT NULL,
+ `i` int(11) NOT NULL DEFAULT '42'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+LOAD DATA INFILE "file1.dat" INTO table t1;
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+# It is strange that "like_b" gets NULL when "b" gets 0. But
+# this is consistent with how "a" gets NULL when "b" gets 0,
+# with how "g" gets NULL when "d" gets 0, and with how "h" gets
+# NULL when "i" gets 0. Looks like "DEFAULT
+# <non-NULL-constant>" is changed to 0, whereas DEFAULT NULL
+# and DEFAULT NOW are changed to NULL.
+SELECT * FROM t1;
+dummy 1
+a NULL
+b 0000-00-00 00:00:00.000000
+like_b NULL
+c 0000-00-00 00:00:00.000000
+like_c 0000-00-00 00:00:00.000000
+d 0000-00-00 00:00:00.000000
+e 2011-08-01 15:11:19.089786
+f 2011-08-01 15:11:19.089786
+g NULL
+h NULL
+i 0
+delete from t1;
+alter table t1
+modify f TIMESTAMP NULL default CURRENT_TIMESTAMP;
+# There is no promotion
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `dummy` int(11) DEFAULT NULL,
+ `a` datetime(6) DEFAULT NULL,
+ `b` datetime(6) DEFAULT '2011-11-18 00:00:00.000000',
+ `like_b` datetime(6) DEFAULT CURRENT_TIMESTAMP(6),
+ `c` datetime(6) NOT NULL DEFAULT '2011-11-18 00:00:00.000000',
+ `like_c` datetime(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+ `d` timestamp(6) NULL DEFAULT '2011-05-03 00:00:00.000000' ON UPDATE CURRENT_TIMESTAMP(6),
+ `e` timestamp(6) NOT NULL DEFAULT '2011-05-03 00:00:00.000000',
+ `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
+ `g` timestamp(6) NULL DEFAULT NULL,
+ `h` int(11) DEFAULT NULL,
+ `i` int(11) NOT NULL DEFAULT '42'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+LOAD DATA INFILE "file1.dat" INTO table t1;
+Warnings:
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+Warning 1261 Row 1 doesn't contain data for all columns
+SELECT * FROM t1;
+dummy 1
+a NULL
+b 0000-00-00 00:00:00.000000
+like_b NULL
+c 0000-00-00 00:00:00.000000
+like_c 0000-00-00 00:00:00.000000
+d 0000-00-00 00:00:00.000000
+e 2011-08-01 15:11:19.089786
+f NULL
+g NULL
+h NULL
+i 0
+delete from t1;
+drop table t1;
+# Conclusion derived from trunk's results:
+# DATETIME DEFAULT <non-NULL-constant> (b,c) gets 0000-00-00,
+# DATETIME DEFAULT NULL (a) gets NULL,
+# TIMESTAMP NULL DEFAULT <non-NULL-constant> (d) gets 0000-00-00,
+# TIMESTAMP NULL DEFAULT NULL (g) gets NULL,
+# TIMESTAMP NULL DEFAULT NOW (f after ALTER) gets NULL,
+# TIMESTAMP NOT NULL (f before ALTER, e) gets NOW.
+### Loading NULL ###
+CREATE TABLE t1 (
+dummy INT,
+a DATETIME(6) NULL DEFAULT NULL,
+b DATETIME(6) NULL DEFAULT "2011-11-18",
+like_b DATETIME(6) NULL DEFAULT CURRENT_TIMESTAMP(6),
+c DATETIME(6) NOT NULL DEFAULT "2011-11-18",
+like_c DATETIME(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+d TIMESTAMP(6) NULL DEFAULT "2011-05-03" ON UPDATE CURRENT_TIMESTAMP(6),
+e TIMESTAMP(6) NOT NULL DEFAULT "2011-05-03",
+f TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+g TIMESTAMP(6) NULL DEFAULT NULL,
+h INT NULL,
+i INT NOT NULL DEFAULT 42
+);
+# There is no promotion
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `dummy` int(11) DEFAULT NULL,
+ `a` datetime(6) DEFAULT NULL,
+ `b` datetime(6) DEFAULT '2011-11-18 00:00:00.000000',
+ `like_b` datetime(6) DEFAULT CURRENT_TIMESTAMP(6),
+ `c` datetime(6) NOT NULL DEFAULT '2011-11-18 00:00:00.000000',
+ `like_c` datetime(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+ `d` timestamp(6) NULL DEFAULT '2011-05-03 00:00:00.000000' ON UPDATE CURRENT_TIMESTAMP(6),
+ `e` timestamp(6) NOT NULL DEFAULT '2011-05-03 00:00:00.000000',
+ `f` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+ `g` timestamp(6) NULL DEFAULT NULL,
+ `h` int(11) DEFAULT NULL,
+ `i` int(11) NOT NULL DEFAULT '42'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+LOAD DATA INFILE "file2.dat" INTO table t1;
+Warnings:
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'c' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'like_c' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'i' at row 1
+SELECT * FROM t1;
+dummy NULL
+a NULL
+b NULL
+like_b NULL
+c 0000-00-00 00:00:00.000000
+like_c 0000-00-00 00:00:00.000000
+d NULL
+e 2011-08-01 15:11:19.089786
+f 2011-08-01 15:11:19.089786
+g NULL
+h NULL
+i 0
+delete from t1;
+alter table t1
+modify f TIMESTAMP NULL default CURRENT_TIMESTAMP;
+# There is no promotion
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `dummy` int(11) DEFAULT NULL,
+ `a` datetime(6) DEFAULT NULL,
+ `b` datetime(6) DEFAULT '2011-11-18 00:00:00.000000',
+ `like_b` datetime(6) DEFAULT CURRENT_TIMESTAMP(6),
+ `c` datetime(6) NOT NULL DEFAULT '2011-11-18 00:00:00.000000',
+ `like_c` datetime(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+ `d` timestamp(6) NULL DEFAULT '2011-05-03 00:00:00.000000' ON UPDATE CURRENT_TIMESTAMP(6),
+ `e` timestamp(6) NOT NULL DEFAULT '2011-05-03 00:00:00.000000',
+ `f` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
+ `g` timestamp(6) NULL DEFAULT NULL,
+ `h` int(11) DEFAULT NULL,
+ `i` int(11) NOT NULL DEFAULT '42'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+LOAD DATA INFILE "file2.dat" INTO table t1;
+Warnings:
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'c' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'like_c' at row 1
+Warning 1263 Column set to default value; NULL supplied to NOT NULL column 'i' at row 1
+SELECT * FROM t1;
+dummy NULL
+a NULL
+b NULL
+like_b NULL
+c 0000-00-00 00:00:00.000000
+like_c 0000-00-00 00:00:00.000000
+d NULL
+e 2011-08-01 15:11:19.089786
+f NULL
+g NULL
+h NULL
+i 0
+delete from t1;
+# Conclusion derived from trunk's results:
+# DATETIME NULL (a,b) gets NULL,
+# DATETIME NOT NULL (c) gets 0000-00-00,
+# TIMESTAMP NULL (d,f,g) gets NULL,
+# TIMESTAMP NOT NULL (e) gets NOW.
+drop table t1;
+#
+# Test of updatable views with check options. The option can be violated
+# using ON UPDATE updates which is very strange as this offers a loophole
+# in this integrity check.
+#
+SET TIME_ZONE = "+03:00";
+# 1970-01-01 03:16:40
+SET TIMESTAMP = 1000.123456;
+CREATE TABLE t1 ( a INT, b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)) ENGINE = INNODB;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+INSERT INTO t1 ( a ) VALUES ( 1 );
+SELECT * FROM t1;
+a b
+1 1970-01-01 03:16:40.123456
+CREATE VIEW v1 AS SELECT * FROM t1 WHERE b <= '1970-01-01 03:16:40.123456'
+WITH CHECK OPTION;
+SELECT * FROM v1;
+a b
+1 1970-01-01 03:16:40.123456
+# 1970-01-01 03:33:20
+SET TIMESTAMP = 2000.000234;
+UPDATE v1 SET a = 2;
+ERROR HY000: CHECK OPTION failed 'test.v1'
+SELECT * FROM t1;
+a b
+1 1970-01-01 03:16:40.123456
+DROP VIEW v1;
+DROP TABLE t1;
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT '1973-08-14 09:11:22.089786' ON UPDATE CURRENT_TIMESTAMP(6),
+c INT KEY
+);
+# 1973-08-14 09:11:22 UTC
+SET TIMESTAMP = 114167482.534231;
+INSERT INTO t1 ( c ) VALUES ( 1 );
+CREATE VIEW v1 AS
+SELECT *
+FROM t1
+WHERE a >= '1973-08-14 09:11:22'
+WITH LOCAL CHECK OPTION;
+SELECT * FROM v1;
+a c
+1973-08-14 09:11:22.089786 1
+SET TIMESTAMP = 1.126789;
+INSERT INTO v1 ( c ) VALUES ( 1 ) ON DUPLICATE KEY UPDATE c = 2;
+ERROR HY000: CHECK OPTION failed 'test.v1'
+SELECT * FROM v1;
+a c
+1973-08-14 09:11:22.089786 1
+DROP VIEW v1;
+DROP TABLE t1;
+#
+# Bug 13095459 - MULTI-TABLE UPDATE MODIFIES A ROW TWICE
+#
+CREATE TABLE t1 (
+a INT,
+b INT,
+ts TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
+PRIMARY KEY ( a, ts )
+) ENGINE = INNODB;
+INSERT INTO t1( a, b, ts ) VALUES ( 1, 0, '2000-09-28 17:44:34' );
+CREATE TABLE t2 ( a INT ) ENGINE = INNODB;
+INSERT INTO t2 VALUES ( 1 );
+UPDATE t1 STRAIGHT_JOIN t2
+SET t1.b = t1.b + 1
+WHERE t1.a = 1 AND t1.ts >= '2000-09-28 00:00:00';
+SELECT b FROM t1;
+b
+1
+DROP TABLE t1, t2;
+#
+# Bug#11745578: 17392: ALTER TABLE ADD COLUMN TIMESTAMP DEFAULT
+# CURRENT_TIMESTAMP INSERTS ZERO
+#
+SET timestamp = 1000;
+CREATE TABLE t1 ( b INT );
+INSERT INTO t1 VALUES (1);
+ALTER TABLE t1 ADD COLUMN a6 DATETIME(6) DEFAULT NOW(6) ON UPDATE NOW(6) FIRST;
+ALTER TABLE t1 ADD COLUMN a5 DATETIME(6) DEFAULT NOW(6) FIRST;
+ALTER TABLE t1 ADD COLUMN a4 DATETIME(6) ON UPDATE NOW(6) FIRST;
+ALTER TABLE t1 ADD COLUMN a3 TIMESTAMP(6) NOT NULL DEFAULT NOW(6) ON UPDATE NOW(6) FIRST;
+ALTER TABLE t1 ADD COLUMN a2 TIMESTAMP(6) NOT NULL DEFAULT NOW(6) FIRST;
+ALTER TABLE t1 ADD COLUMN a1 TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE NOW(6) FIRST;
+ALTER TABLE t1 ADD COLUMN c1 TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE NOW(6) AFTER b;
+ALTER TABLE t1 ADD COLUMN c2 TIMESTAMP(6) NOT NULL DEFAULT NOW(6) AFTER c1;
+ALTER TABLE t1 ADD COLUMN c3 TIMESTAMP(6) NOT NULL DEFAULT NOW(6) ON UPDATE NOW(6) AFTER c2;
+ALTER TABLE t1 ADD COLUMN c4 DATETIME(6) ON UPDATE NOW(6) AFTER c3;
+ALTER TABLE t1 ADD COLUMN c5 DATETIME(6) DEFAULT NOW(6) AFTER c4;
+ALTER TABLE t1 ADD COLUMN c6 DATETIME(6) DEFAULT NOW(6) ON UPDATE NOW(6) AFTER c5;
+SELECT * FROM t1;
+a1 a2 a3 a4 a5 a6 b c1 c2 c3 c4 c5 c6
+0000-00-00 00:00:00.000000 1970-01-01 03:16:40.000000 1970-01-01 03:16:40.000000 NULL 1970-01-01 03:16:40.000000 1970-01-01 03:16:40.000000 1 0000-00-00 00:00:00.000000 1970-01-01 03:16:40.000000 1970-01-01 03:16:40.000000 NULL 1970-01-01 03:16:40.000000 1970-01-01 03:16:40.000000
+DROP TABLE t1;
+CREATE TABLE t1 ( a TIMESTAMP(6) NOT NULL DEFAULT NOW(6) ON UPDATE CURRENT_TIMESTAMP(6), b DATETIME(6) DEFAULT NOW(6) );
+INSERT INTO t1 VALUES ();
+SET timestamp = 1000000000;
+ALTER TABLE t1 MODIFY COLUMN a TIMESTAMP(3) DEFAULT CURRENT_TIMESTAMP(3);
+ALTER TABLE t1 MODIFY COLUMN b DATETIME(3) DEFAULT CURRENT_TIMESTAMP(3);
+SELECT * FROM t1;
+a b
+1970-01-01 03:16:40.000 1970-01-01 03:16:40.000
+DROP TABLE t1;
+CREATE TABLE t1 (
+a TIMESTAMP(6) NOT NULL DEFAULT '1999-12-01 11:22:33' ON UPDATE CURRENT_TIMESTAMP(6),
+b DATETIME(6) DEFAULT '1999-12-01 11:22:33'
+);
+INSERT INTO t1 VALUES ();
+ALTER TABLE t1 MODIFY COLUMN a TIMESTAMP(6) DEFAULT NOW(6);
+ALTER TABLE t1 MODIFY COLUMN b DATETIME(6) DEFAULT NOW(6);
+INSERT INTO t1 VALUES ();
+SELECT * FROM t1;
+a b
+1999-12-01 11:22:33.000000 1999-12-01 11:22:33.000000
+2001-09-09 04:46:40.000000 2001-09-09 04:46:40.000000
+DROP TABLE t1;
diff --git a/mysql-test/r/function_defaults_notembedded.result b/mysql-test/r/function_defaults_notembedded.result
new file mode 100644
index 00000000000..c54ae14aef4
--- /dev/null
+++ b/mysql-test/r/function_defaults_notembedded.result
@@ -0,0 +1,171 @@
+#
+# Test of function defaults for non-embedded server.
+#
+#
+# Function defaults run 1. No microsecond precision.
+#
+SET TIME_ZONE = "+00:00";
+#
+# Test of INSERT DELAYED ... SET ...
+#
+# 2011-04-19 08:02:40 UTC
+SET TIMESTAMP = 1303200160.123456;
+CREATE TABLE t1 ( a INT, b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP);
+INSERT DELAYED INTO t1 SET a = 1;
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a b
+1 2011-04-19 08:02:40
+SELECT * FROM t1 WHERE b = 0;
+a b
+INSERT DELAYED INTO t1 SET a = 2, b = '1980-01-02 10:20:30.405060';
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a b
+1 2011-04-19 08:02:40
+2 1980-01-02 10:20:30
+DROP TABLE t1;
+#
+# Test of INSERT DELAYED ... VALUES ...
+#
+# 2011-04-19 08:04:01 UTC
+SET TIMESTAMP = 1303200241.234567;
+CREATE TABLE t1 ( a INT, b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP);
+INSERT DELAYED INTO t1 ( a ) VALUES (1);
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a b
+1 2011-04-19 08:04:01
+INSERT DELAYED INTO t1 VALUES (2, '1977-12-19 12:34:56.789123');
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a b
+1 2011-04-19 08:04:01
+2 1977-12-19 12:34:56
+DROP TABLE t1;
+#
+# Test of a delayed insert handler servicing two insert operations
+# with different sets of active defaults.
+#
+CREATE TABLE t1 ( a INT, b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP);
+# 2011-04-19 08:04:01 UTC
+SET TIMESTAMP = 1303200241.345678;
+SET debug_sync = 'before_write_delayed SIGNAL parked WAIT_FOR go';
+INSERT DELAYED INTO t1 ( a ) VALUES (1), (2), (3);
+SET debug_sync = 'now WAIT_FOR parked';
+# 2011-04-19 08:04:01 UTC
+SET TIME_ZONE="+03:00";
+SET TIMESTAMP = 1303200241.456789;
+INSERT DELAYED INTO t1 ( a, b ) VALUES (4, '1977-12-19 12:34:56.789123'), (5, '1977-12-19 12:34:57.891234'), (6, '1977-12-19 12:34:58.912345');
+SET debug_sync = 'now SIGNAL go';
+SELECT * FROM t1;
+a b
+1 2011-04-19 08:04:01
+2 2011-04-19 08:04:01
+3 2011-04-19 08:04:01
+4 1977-12-19 09:34:56
+5 1977-12-19 09:34:57
+6 1977-12-19 09:34:58
+DROP TABLE t1;
+#
+# Test of early activation of function defaults.
+#
+CREATE TABLE t1 ( a INT, b TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP);
+SET TIMESTAMP = 1317235172.987654;
+INSERT DELAYED INTO t1 ( a ) VALUES (1), (2), (3);
+SET TIMESTAMP = 385503754.876543;
+INSERT DELAYED INTO t1 ( a ) VALUES (4), (5), (6);
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a b
+1 2011-09-28 18:39:32
+2 2011-09-28 18:39:32
+3 2011-09-28 18:39:32
+4 1982-03-20 20:22:34
+5 1982-03-20 20:22:34
+6 1982-03-20 20:22:34
+DROP TABLE t1;
+#
+# Function defaults run 2. Six digits scale on seconds precision.
+#
+SET TIME_ZONE = "+00:00";
+#
+# Test of INSERT DELAYED ... SET ...
+#
+# 2011-04-19 08:02:40 UTC
+SET TIMESTAMP = 1303200160.123456;
+CREATE TABLE t1 ( a INT, b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6));
+INSERT DELAYED INTO t1 SET a = 1;
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a b
+1 2011-04-19 08:02:40.123456
+SELECT * FROM t1 WHERE b = 0;
+a b
+INSERT DELAYED INTO t1 SET a = 2, b = '1980-01-02 10:20:30.405060';
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a b
+1 2011-04-19 08:02:40.123456
+2 1980-01-02 10:20:30.405060
+DROP TABLE t1;
+#
+# Test of INSERT DELAYED ... VALUES ...
+#
+# 2011-04-19 08:04:01 UTC
+SET TIMESTAMP = 1303200241.234567;
+CREATE TABLE t1 ( a INT, b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6));
+INSERT DELAYED INTO t1 ( a ) VALUES (1);
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a b
+1 2011-04-19 08:04:01.234567
+INSERT DELAYED INTO t1 VALUES (2, '1977-12-19 12:34:56.789123');
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a b
+1 2011-04-19 08:04:01.234567
+2 1977-12-19 12:34:56.789123
+DROP TABLE t1;
+#
+# Test of a delayed insert handler servicing two insert operations
+# with different sets of active defaults.
+#
+CREATE TABLE t1 ( a INT, b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6));
+# 2011-04-19 08:04:01 UTC
+SET TIMESTAMP = 1303200241.345678;
+SET debug_sync = 'before_write_delayed SIGNAL parked WAIT_FOR go';
+INSERT DELAYED INTO t1 ( a ) VALUES (1), (2), (3);
+SET debug_sync = 'now WAIT_FOR parked';
+# 2011-04-19 08:04:01 UTC
+SET TIME_ZONE="+03:00";
+SET TIMESTAMP = 1303200241.456789;
+INSERT DELAYED INTO t1 ( a, b ) VALUES (4, '1977-12-19 12:34:56.789123'), (5, '1977-12-19 12:34:57.891234'), (6, '1977-12-19 12:34:58.912345');
+SET debug_sync = 'now SIGNAL go';
+SELECT * FROM t1;
+a b
+1 2011-04-19 08:04:01.345678
+2 2011-04-19 08:04:01.345678
+3 2011-04-19 08:04:01.345678
+4 1977-12-19 09:34:56.789123
+5 1977-12-19 09:34:57.891234
+6 1977-12-19 09:34:58.912345
+DROP TABLE t1;
+#
+# Test of early activation of function defaults.
+#
+CREATE TABLE t1 ( a INT, b TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6));
+SET TIMESTAMP = 1317235172.987654;
+INSERT DELAYED INTO t1 ( a ) VALUES (1), (2), (3);
+SET TIMESTAMP = 385503754.876543;
+INSERT DELAYED INTO t1 ( a ) VALUES (4), (5), (6);
+FLUSH TABLE t1;
+SELECT * FROM t1;
+a b
+1 2011-09-28 18:39:32.987654
+2 2011-09-28 18:39:32.987654
+3 2011-09-28 18:39:32.987654
+4 1982-03-20 20:22:34.876543
+5 1982-03-20 20:22:34.876543
+6 1982-03-20 20:22:34.876543
+DROP TABLE t1;
diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result
index 9fb21c3d828..1f2bbc470dc 100644
--- a/mysql-test/r/group_by.result
+++ b/mysql-test/r/group_by.result
@@ -2246,3 +2246,48 @@ i GROUP_CONCAT( d1, d2 ORDER BY d1, d2 )
2 22.2
NULL 11.1,22.2
DROP TABLE t1;
+#
+# Bug #58782
+# Missing rows with SELECT .. WHERE .. IN subquery
+# with full GROUP BY and no aggr
+#
+CREATE TABLE t1 (
+pk INT NOT NULL,
+col_int_nokey INT,
+PRIMARY KEY (pk)
+);
+INSERT INTO t1 VALUES (10,7);
+INSERT INTO t1 VALUES (11,1);
+INSERT INTO t1 VALUES (12,5);
+INSERT INTO t1 VALUES (13,3);
+SELECT pk AS field1, col_int_nokey AS field2
+FROM t1
+WHERE col_int_nokey > 0
+GROUP BY field1, field2;
+field1 field2
+10 7
+11 1
+12 5
+13 3
+CREATE TABLE where_subselect
+SELECT pk AS field1, col_int_nokey AS field2
+FROM t1
+WHERE col_int_nokey > 0
+GROUP BY field1, field2
+;
+SELECT *
+FROM where_subselect
+WHERE (field1, field2) IN (
+SELECT pk AS field1, col_int_nokey AS field2
+FROM t1
+WHERE col_int_nokey > 0
+GROUP BY field1, field2
+);
+field1 field2
+10 7
+11 1
+12 5
+13 3
+DROP TABLE t1;
+DROP TABLE where_subselect;
+# End of Bug #58782
diff --git a/mysql-test/r/have_debug_sync.require b/mysql-test/r/have_debug_sync.require
deleted file mode 100644
index c2090bc5657..00000000000
--- a/mysql-test/r/have_debug_sync.require
+++ /dev/null
@@ -1,2 +0,0 @@
-debug_sync
-1
diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result
index 98d16bf713c..a30fe668490 100644
--- a/mysql-test/r/information_schema.result
+++ b/mysql-test/r/information_schema.result
@@ -81,6 +81,7 @@ TRIGGERS
USER_PRIVILEGES
USER_STATISTICS
VIEWS
+column_stats
columns_priv
db
event
@@ -91,6 +92,7 @@ help_keyword
help_relation
help_topic
host
+index_stats
plugin
proc
procs_priv
@@ -102,6 +104,7 @@ t2
t3
t4
t5
+table_stats
tables_priv
time_zone
time_zone_leap_second
@@ -120,6 +123,7 @@ TABLE_CONSTRAINTS TABLE_CONSTRAINTS
TABLE_PRIVILEGES TABLE_PRIVILEGES
TABLE_STATISTICS TABLE_STATISTICS
TRIGGERS TRIGGERS
+table_stats table_stats
tables_priv tables_priv
time_zone time_zone
time_zone_leap_second time_zone_leap_second
@@ -141,6 +145,7 @@ TABLE_CONSTRAINTS TABLE_CONSTRAINTS
TABLE_PRIVILEGES TABLE_PRIVILEGES
TABLE_STATISTICS TABLE_STATISTICS
TRIGGERS TRIGGERS
+table_stats table_stats
tables_priv tables_priv
time_zone time_zone
time_zone_leap_second time_zone_leap_second
@@ -162,6 +167,7 @@ TABLE_CONSTRAINTS TABLE_CONSTRAINTS
TABLE_PRIVILEGES TABLE_PRIVILEGES
TABLE_STATISTICS TABLE_STATISTICS
TRIGGERS TRIGGERS
+table_stats table_stats
tables_priv tables_priv
time_zone time_zone
time_zone_leap_second time_zone_leap_second
diff --git a/mysql-test/r/information_schema_all_engines.result b/mysql-test/r/information_schema_all_engines.result
index 4dac9a3e53c..455f2e36ffc 100644
--- a/mysql-test/r/information_schema_all_engines.result
+++ b/mysql-test/r/information_schema_all_engines.result
@@ -418,4 +418,4 @@ Wildcard: inf_rmation_schema
SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name<>'ndb_binlog_index' AND table_name<>'ndb_apply_status' GROUP BY TABLE_SCHEMA;
table_schema count(*)
information_schema 58
-mysql 23
+mysql 26
diff --git a/mysql-test/r/innodb_mysql_sync.result b/mysql-test/r/innodb_mysql_sync.result
index 58254412c5b..044e582ceb6 100644
--- a/mysql-test/r/innodb_mysql_sync.result
+++ b/mysql-test/r/innodb_mysql_sync.result
@@ -18,10 +18,10 @@ SET DEBUG_SYNC='now SIGNAL table_altered';
# Complete optimization
Table Op Msg_type Msg_text
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
-test.t1 optimize error Got error -1 from storage engine
+test.t1 optimize error Got error -1 "Internal error < 0 (Not system error)" from storage engine
test.t1 optimize status Operation failed
Warnings:
-Error 1030 Got error -1 from storage engine
+Error 1030 Got error -1 "Internal error < 0 (Not system error)" from storage engine
DROP TABLE t1;
SET DEBUG_SYNC='RESET';
#
diff --git a/mysql-test/r/is_debug_build.require b/mysql-test/r/is_debug_build.require
deleted file mode 100644
index 4d77bcdc1ed..00000000000
--- a/mysql-test/r/is_debug_build.require
+++ /dev/null
@@ -1,2 +0,0 @@
-instr(version(), "debug") > 0
-1
diff --git a/mysql-test/r/loaddata.result b/mysql-test/r/loaddata.result
index 328d26eec72..932c1c76027 100644
--- a/mysql-test/r/loaddata.result
+++ b/mysql-test/r/loaddata.result
@@ -5,8 +5,8 @@ Warnings:
Warning 1265 Data truncated for column 'a' at row 1
Warning 1265 Data truncated for column 'c' at row 1
Warning 1265 Data truncated for column 'd' at row 1
-Warning 1264 Out of range value for column 'a' at row 2
-Warning 1264 Out of range value for column 'b' at row 2
+Warning 1265 Data truncated for column 'a' at row 2
+Warning 1265 Data truncated for column 'b' at row 2
Warning 1265 Data truncated for column 'd' at row 2
load data infile '../../std_data/loaddata1.dat' into table t1 fields terminated by ',' IGNORE 2 LINES;
SELECT * from t1;
@@ -20,7 +20,7 @@ load data infile '../../std_data/loaddata1.dat' into table t1 fields terminated
Warnings:
Warning 1265 Data truncated for column 'c' at row 1
Warning 1265 Data truncated for column 'd' at row 1
-Warning 1264 Out of range value for column 'b' at row 2
+Warning 1265 Data truncated for column 'b' at row 2
Warning 1265 Data truncated for column 'd' at row 2
SELECT * from t1;
a b c d
diff --git a/mysql-test/r/log_slow.result b/mysql-test/r/log_slow.result
index 75e92e7a0b5..76cf45631bd 100644
--- a/mysql-test/r/log_slow.result
+++ b/mysql-test/r/log_slow.result
@@ -44,7 +44,7 @@ select @@log_slow_verbosity;
innodb
show fields from mysql.slow_log;
Field Type Null Key Default Extra
-start_time timestamp(6) NO CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP
+start_time timestamp(6) NO CURRENT_TIMESTAMP(6) on update CURRENT_TIMESTAMP
user_host mediumtext NO NULL
query_time time(6) NO NULL
lock_time time(6) NO NULL
diff --git a/mysql-test/r/log_tables.result b/mysql-test/r/log_tables.result
index 62775f3db4d..dd3bee0ac88 100644
--- a/mysql-test/r/log_tables.result
+++ b/mysql-test/r/log_tables.result
@@ -53,7 +53,7 @@ ERROR HY000: You can't use locks with log tables.
show create table mysql.general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
- `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`thread_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
@@ -62,7 +62,7 @@ general_log CREATE TABLE `general_log` (
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log'
show fields from mysql.general_log;
Field Type Null Key Default Extra
-event_time timestamp(6) NO CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP
+event_time timestamp(6) NO CURRENT_TIMESTAMP(6) on update CURRENT_TIMESTAMP
user_host mediumtext NO NULL
thread_id int(11) NO NULL
server_id int(10) unsigned NO NULL
@@ -71,7 +71,7 @@ argument mediumtext NO NULL
show create table mysql.slow_log;
Table Create Table
slow_log CREATE TABLE `slow_log` (
- `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`query_time` time(6) NOT NULL,
`lock_time` time(6) NOT NULL,
@@ -85,7 +85,7 @@ slow_log CREATE TABLE `slow_log` (
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
show fields from mysql.slow_log;
Field Type Null Key Default Extra
-start_time timestamp(6) NO CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP
+start_time timestamp(6) NO CURRENT_TIMESTAMP(6) on update CURRENT_TIMESTAMP
user_host mediumtext NO NULL
query_time time(6) NO NULL
lock_time time(6) NO NULL
@@ -164,7 +164,7 @@ set global slow_query_log='OFF';
show create table mysql.general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
- `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`thread_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
@@ -174,7 +174,7 @@ general_log CREATE TABLE `general_log` (
show create table mysql.slow_log;
Table Create Table
slow_log CREATE TABLE `slow_log` (
- `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`query_time` time(6) NOT NULL,
`lock_time` time(6) NOT NULL,
@@ -191,7 +191,7 @@ alter table mysql.slow_log engine=myisam;
show create table mysql.general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
- `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`thread_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
@@ -201,7 +201,7 @@ general_log CREATE TABLE `general_log` (
show create table mysql.slow_log;
Table Create Table
slow_log CREATE TABLE `slow_log` (
- `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`query_time` time(6) NOT NULL,
`lock_time` time(6) NOT NULL,
diff --git a/mysql-test/r/log_tables_upgrade.result b/mysql-test/r/log_tables_upgrade.result
index 5ed59eecc31..66194267380 100644
--- a/mysql-test/r/log_tables_upgrade.result
+++ b/mysql-test/r/log_tables_upgrade.result
@@ -19,6 +19,7 @@ mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
mysql
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -28,6 +29,7 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
@@ -35,6 +37,7 @@ mysql.procs_priv OK
mysql.proxies_priv OK
mysql.renamed_general_log OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
diff --git a/mysql-test/r/lowercase_table2.result b/mysql-test/r/lowercase_table2.result
index abd16641b56..c478d792e52 100644
--- a/mysql-test/r/lowercase_table2.result
+++ b/mysql-test/r/lowercase_table2.result
@@ -272,7 +272,7 @@ Database Table In_use Name_locked
test t_bug44738_uppercase 0 0
# So attempt to create table with the same name should fail.
create table t_bug44738_UPPERCASE (i int);
-ERROR HY000: Can't find file: 't_bug44738_uppercase' (errno: 2)
+ERROR HY000: Can't find file: 't_bug44738_uppercase' (errno: 2 "No such file or directory")
# And should succeed after FLUSH TABLES.
flush tables;
create table t_bug44738_UPPERCASE (i int);
diff --git a/mysql-test/r/mdev-504.result b/mysql-test/r/mdev-504.result
new file mode 100644
index 00000000000..e178127cf2a
--- /dev/null
+++ b/mysql-test/r/mdev-504.result
@@ -0,0 +1,21 @@
+CREATE TABLE A (
+pk INTEGER AUTO_INCREMENT PRIMARY KEY,
+fdate DATE
+) ENGINE=MyISAM;
+CREATE PROCEDURE p_analyze()
+BEGIN
+DECLARE attempts INTEGER DEFAULT 100;
+wl_loop: WHILE attempts > 0 DO
+ANALYZE TABLE A;
+SET attempts = attempts - 1;
+END WHILE wl_loop;
+END |
+CREATE FUNCTION rnd3() RETURNS INT
+BEGIN
+RETURN ROUND(3 * RAND() + 0.5);
+END |
+SET GLOBAL use_stat_tables = PREFERABLY;
+DROP TABLE A;
+DROP PROCEDURE p_analyze;
+DROP FUNCTION rnd3;
+SET GLOBAL use_stat_tables = DEFAULT;
diff --git a/mysql-test/r/myisam-system.result b/mysql-test/r/myisam-system.result
index b3ba8066f5c..924e7885814 100644
--- a/mysql-test/r/myisam-system.result
+++ b/mysql-test/r/myisam-system.result
@@ -2,7 +2,7 @@ drop table if exists t1,t2;
create table t1 (a int) engine=myisam;
drop table if exists t1;
Warnings:
-Warning 2 Can't find file: 't1' (errno: 2)
+Warning 2 Can't find file: 't1' (errno: 2 "No such file or directory")
create table t1 (a int) engine=myisam;
drop table t1;
Got one of the listed errors
diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result
index 64c61d4ee36..6c0826775ad 100644
--- a/mysql-test/r/myisam.result
+++ b/mysql-test/r/myisam.result
@@ -1697,7 +1697,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
drop table t1;
create table t1 (v varchar(65535));
-ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 65535. You have to change some columns to TEXT or BLOBs
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 65535. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
set storage_engine=MyISAM;
set @save_concurrent_insert=@@concurrent_insert;
set global concurrent_insert=1;
diff --git a/mysql-test/r/myisampack.result b/mysql-test/r/myisampack.result
index 56f61ccdf47..2cefa385048 100644
--- a/mysql-test/r/myisampack.result
+++ b/mysql-test/r/myisampack.result
@@ -113,7 +113,7 @@ ERROR 42S02: Table 'test.t3' doesn't exist
# ===== myisampack.4 =====
#Tests the myisampack join operation with an existing destination .frm,.MYI,.MDI
#the command should fail with exit status 2
-myisampack: Can't create/write to file (Errcode: 17)
+myisampack: Can't create/write to file (Errcode: 17 "File exists")
Aborted: file is not compressed
DROP TABLE t1,t2,t3;
DROP TABLE mysql_db1.t1;
diff --git a/mysql-test/r/mysql_upgrade.result b/mysql-test/r/mysql_upgrade.result
index 247c2b80d62..1bd2944f86e 100644
--- a/mysql-test/r/mysql_upgrade.result
+++ b/mysql-test/r/mysql_upgrade.result
@@ -7,6 +7,7 @@ mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
mysql
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -16,12 +17,14 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
@@ -44,6 +47,7 @@ mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
mysql
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -53,12 +57,14 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
@@ -81,6 +87,7 @@ mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
mysql
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -90,12 +97,14 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
@@ -121,6 +130,7 @@ mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
mysql
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -130,12 +140,14 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
@@ -164,6 +176,7 @@ mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
mysql
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -173,12 +186,14 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
@@ -210,6 +225,7 @@ mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
mysql
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -219,12 +235,14 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
@@ -259,6 +277,7 @@ mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
mysql
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -268,12 +287,14 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
diff --git a/mysql-test/r/mysql_upgrade_ssl.result b/mysql-test/r/mysql_upgrade_ssl.result
index a08e7c115cc..29a33a7e8ac 100644
--- a/mysql-test/r/mysql_upgrade_ssl.result
+++ b/mysql-test/r/mysql_upgrade_ssl.result
@@ -9,6 +9,7 @@ mtr
mtr.global_suppressions OK
mtr.test_suppressions OK
mysql
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -18,12 +19,14 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
diff --git a/mysql-test/r/mysqlbinlog.result b/mysql-test/r/mysqlbinlog.result
index 33904dfd9bd..255b0679244 100644
--- a/mysql-test/r/mysqlbinlog.result
+++ b/mysql-test/r/mysqlbinlog.result
@@ -892,6 +892,7 @@ DROP DATABASE test1;
FLUSH LOGS;
show binlog events in 'master-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Binlog_checkpoint # # master-bin.000002
master-bin.000002 # Query # # CREATE DATABASE test1
master-bin.000002 # Query # # use `test1`; CREATE TABLE t1(id int)
master-bin.000002 # Query # # DROP DATABASE test1
diff --git a/mysql-test/r/mysqlcheck.result b/mysql-test/r/mysqlcheck.result
index ab707ceef80..034913a72e6 100644
--- a/mysql-test/r/mysqlcheck.result
+++ b/mysql-test/r/mysqlcheck.result
@@ -3,6 +3,7 @@ drop view if exists v1;
drop database if exists client_test_db;
mtr.global_suppressions OK
mtr.test_suppressions OK
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -12,12 +13,14 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
@@ -27,6 +30,7 @@ mysql.time_zone_transition_type OK
mysql.user OK
mtr.global_suppressions Table is already up to date
mtr.test_suppressions Table is already up to date
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -36,12 +40,14 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
@@ -49,6 +55,7 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.user OK
+mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@@ -58,12 +65,14 @@ mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
+mysql.index_stats OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
+mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
@@ -71,6 +80,7 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.user OK
+mysql.column_stats Table is already up to date
mysql.columns_priv Table is already up to date
mysql.db Table is already up to date
mysql.event Table is already up to date
@@ -80,12 +90,14 @@ mysql.help_keyword Table is already up to date
mysql.help_relation Table is already up to date
mysql.help_topic Table is already up to date
mysql.host Table is already up to date
+mysql.index_stats Table is already up to date
mysql.ndb_binlog_index Table is already up to date
mysql.plugin Table is already up to date
mysql.proc Table is already up to date
mysql.procs_priv Table is already up to date
mysql.proxies_priv Table is already up to date
mysql.servers Table is already up to date
+mysql.table_stats Table is already up to date
mysql.tables_priv Table is already up to date
mysql.time_zone Table is already up to date
mysql.time_zone_leap_second Table is already up to date
diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result
index c36cc96bb1a..cff5b49191f 100644
--- a/mysql-test/r/mysqld--help.result
+++ b/mysql-test/r/mysqld--help.result
@@ -361,8 +361,7 @@ The following options may be given as the first argument:
--max-binlog-cache-size=#
Sets the total size of the transactional cache
--max-binlog-size=# Binary log will be rotated automatically when the size
- exceeds this value. Will also apply to relay logs if
- max_relay_log_size is 0
+ exceeds this value.
--max-binlog-stmt-cache-size=#
Sets the total size of the statement cache
--max-connect-errors=#
@@ -388,9 +387,9 @@ The following options may be given as the first argument:
--max-prepared-stmt-count=#
Maximum number of prepared statements in the server
--max-relay-log-size=#
- If non-zero: relay log will be rotated automatically when
- the size exceeds this value; if zero: when the size
- exceeds max_binlog_size
+ relay log will be rotated automatically when the size
+ exceeds this value. If 0 are startup, it's set to
+ max_binlog_size
--max-seeks-for-key=#
Limit assumed max number of seeks when looking up rows
based on a key
@@ -745,7 +744,7 @@ The following options may be given as the first argument:
The maximum packet length to sent successfully from the
master to slave.
--slave-net-timeout=#
- Number of seconds to wait for more data from a
+ Number of seconds to wait for more data from any
master/slave connection before aborting the read
--slave-skip-errors=name
Tells the slave thread to continue replication when a
@@ -843,6 +842,9 @@ The following options may be given as the first argument:
Prohibit update of a VIEW, which does not contain a key
of the underlying table and the query uses a LIMIT clause
(usually get from GUI tools)
+ --use-stat-tables=name
+ Specifies how to use system statistics tables. Possible
+ values are NEVER, COMPLEMENTARY, PREVERABLY
-u, --user=name Run mysqld daemon as user.
--userstat Enables statistics gathering for USER_STATISTICS,
CLIENT_STATISTICS, INDEX_STATISTICS and TABLE_STATISTICS
@@ -970,7 +972,7 @@ max-join-size 18446744073709551615
max-length-for-sort-data 1024
max-long-data-size 1048576
max-prepared-stmt-count 16382
-max-relay-log-size 0
+max-relay-log-size 1073741824
max-seeks-for-key 18446744073709551615
max-sort-length 1024
max-sp-recursion-depth 0
@@ -1096,6 +1098,7 @@ transaction-alloc-block-size 8192
transaction-isolation REPEATABLE-READ
transaction-prealloc-size 4096
updatable-views-with-limit YES
+use-stat-tables NEVER
userstat FALSE
verbose TRUE
wait-timeout 28800
diff --git a/mysql-test/r/mysqldump-max.result b/mysql-test/r/mysqldump-max.result
index 6722f308358..fb0be20c119 100644
--- a/mysql-test/r/mysqldump-max.result
+++ b/mysql-test/r/mysqldump-max.result
@@ -332,12 +332,12 @@ a b
2 1
DROP TABLE t1;
DROP TABLE t2;
-SHOW BINLOG EVENTS LIMIT 6,3;
+SHOW BINLOG EVENTS LIMIT 7,3;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 663 Query 1 731 BEGIN
-master-bin.000001 731 Query 1 828 use `test`; INSERT INTO t2 VALUES (1,0), (2,0)
-master-bin.000001 828 Xid 1 855 COMMIT /* XID */
--- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=855;
+master-bin.000001 704 Query 1 772 BEGIN
+master-bin.000001 772 Query 1 869 use `test`; INSERT INTO t2 VALUES (1,0), (2,0)
+master-bin.000001 869 Xid 1 896 COMMIT /* XID */
+-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=896;
SELECT * FROM t1 ORDER BY a;
a
1
diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result
index 7b650addd3d..ef8bcea3649 100644
--- a/mysql-test/r/mysqldump.result
+++ b/mysql-test/r/mysqldump.result
@@ -5239,7 +5239,7 @@ Error 1146 Table 'mysql.event' doesn't exist
SHOW CREATE TABLE mysql.general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
- `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`thread_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
@@ -5249,7 +5249,7 @@ general_log CREATE TABLE `general_log` (
SHOW CREATE TABLE mysql.slow_log;
Table Create Table
slow_log CREATE TABLE `slow_log` (
- `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`query_time` time(6) NOT NULL,
`lock_time` time(6) NOT NULL,
diff --git a/mysql-test/r/mysqltest.result b/mysql-test/r/mysqltest.result
index a3043aed711..811861a6d93 100644
--- a/mysql-test/r/mysqltest.result
+++ b/mysql-test/r/mysqltest.result
@@ -232,7 +232,7 @@ mysqltest: At line 2: Spurious text after `query` expression
mysqltest: At line 1: Missing argument(s) to 'error'
mysqltest: At line 1: Missing argument(s) to 'error'
mysqltest: At line 1: The sqlstate definition must start with an uppercase S
-mysqltest: At line 1: The error name definition must start with an uppercase E
+mysqltest: At line 1: The error name definition must start with an uppercase E or W
mysqltest: At line 1: Invalid argument to error: '9eeeee' - the errno may only consist of digits[0-9]
mysqltest: At line 1: Invalid argument to error: '1sssss' - the errno may only consist of digits[0-9]
mysqltest: At line 1: The sqlstate must be exactly 5 chars long
diff --git a/mysql-test/r/not_openssl.require b/mysql-test/r/not_openssl.require
deleted file mode 100644
index 2b5e423999c..00000000000
--- a/mysql-test/r/not_openssl.require
+++ /dev/null
@@ -1,2 +0,0 @@
-Variable_name Value
-have_openssl NO
diff --git a/mysql-test/r/order_by.result b/mysql-test/r/order_by.result
index 94e7d5e757a..054dc9e4fc4 100644
--- a/mysql-test/r/order_by.result
+++ b/mysql-test/r/order_by.result
@@ -1439,6 +1439,7 @@ CALL mtr.add_suppression("Out of sort memory");
select * from t1 order by b;
ERROR HY001: Out of sort memory, consider increasing server sort buffer size
drop table t1;
+set session sort_buffer_size= 30000;
#
# Bug #39844: Query Crash Mysql Server 5.0.67
#
@@ -1533,6 +1534,890 @@ ppfcz1 DE ppfcz1 14 8 57.5
ppfcz1 DE ppfcz1 14 9 59.5
ppfcz1 DE ppfcz1 14 10 61.5
DROP TABLE t1,t2,t3;
+#
+# WL#1393 - Optimizing filesort with small limit
+#
+CREATE TABLE t1(f0 int auto_increment primary key, f1 int, f2 varchar(200));
+INSERT INTO t1(f1, f2) VALUES
+(0,"0"),(1,"1"),(2,"2"),(3,"3"),(4,"4"),(5,"5"),
+(6,"6"),(7,"7"),(8,"8"),(9,"9"),(10,"10"),
+(11,"11"),(12,"12"),(13,"13"),(14,"14"),(15,"15"),
+(16,"16"),(17,"17"),(18,"18"),(19,"19"),(20,"20"),
+(21,"21"),(22,"22"),(23,"23"),(24,"24"),(25,"25"),
+(26,"26"),(27,"27"),(28,"28"),(29,"29"),(30,"30"),
+(31,"31"),(32,"32"),(33,"33"),(34,"34"),(35,"35"),
+(36,"36"),(37,"37"),(38,"38"),(39,"39"),(40,"40"),
+(41,"41"),(42,"42"),(43,"43"),(44,"44"),(45,"45"),
+(46,"46"),(47,"47"),(48,"48"),(49,"49"),(50,"50"),
+(51,"51"),(52,"52"),(53,"53"),(54,"54"),(55,"55"),
+(56,"56"),(57,"57"),(58,"58"),(59,"59"),(60,"60"),
+(61,"61"),(62,"62"),(63,"63"),(64,"64"),(65,"65"),
+(66,"66"),(67,"67"),(68,"68"),(69,"69"),(70,"70"),
+(71,"71"),(72,"72"),(73,"73"),(74,"74"),(75,"75"),
+(76,"76"),(77,"77"),(78,"78"),(79,"79"),(80,"80"),
+(81,"81"),(82,"82"),(83,"83"),(84,"84"),(85,"85"),
+(86,"86"),(87,"87"),(88,"88"),(89,"89"),(90,"90"),
+(91,"91"),(92,"92"),(93,"93"),(94,"94"),(95,"95"),
+(96,"96"),(97,"97"),(98,"98"),(99,"99");
+SELECT * FROM t1 ORDER BY f1 ASC, f0 LIMIT 100;
+f0 f1 f2
+1 0 0
+2 1 1
+3 2 2
+4 3 3
+5 4 4
+6 5 5
+7 6 6
+8 7 7
+9 8 8
+10 9 9
+11 10 10
+12 11 11
+13 12 12
+14 13 13
+15 14 14
+16 15 15
+17 16 16
+18 17 17
+19 18 18
+20 19 19
+21 20 20
+22 21 21
+23 22 22
+24 23 23
+25 24 24
+26 25 25
+27 26 26
+28 27 27
+29 28 28
+30 29 29
+31 30 30
+32 31 31
+33 32 32
+34 33 33
+35 34 34
+36 35 35
+37 36 36
+38 37 37
+39 38 38
+40 39 39
+41 40 40
+42 41 41
+43 42 42
+44 43 43
+45 44 44
+46 45 45
+47 46 46
+48 47 47
+49 48 48
+50 49 49
+51 50 50
+52 51 51
+53 52 52
+54 53 53
+55 54 54
+56 55 55
+57 56 56
+58 57 57
+59 58 58
+60 59 59
+61 60 60
+62 61 61
+63 62 62
+64 63 63
+65 64 64
+66 65 65
+67 66 66
+68 67 67
+69 68 68
+70 69 69
+71 70 70
+72 71 71
+73 72 72
+74 73 73
+75 74 74
+76 75 75
+77 76 76
+78 77 77
+79 78 78
+80 79 79
+81 80 80
+82 81 81
+83 82 82
+84 83 83
+85 84 84
+86 85 85
+87 86 86
+88 87 87
+89 88 88
+90 89 89
+91 90 90
+92 91 91
+93 92 92
+94 93 93
+95 94 94
+96 95 95
+97 96 96
+98 97 97
+99 98 98
+100 99 99
+SELECT * FROM t1 ORDER BY f1 ASC, f0 LIMIT 30;
+f0 f1 f2
+1 0 0
+2 1 1
+3 2 2
+4 3 3
+5 4 4
+6 5 5
+7 6 6
+8 7 7
+9 8 8
+10 9 9
+11 10 10
+12 11 11
+13 12 12
+14 13 13
+15 14 14
+16 15 15
+17 16 16
+18 17 17
+19 18 18
+20 19 19
+21 20 20
+22 21 21
+23 22 22
+24 23 23
+25 24 24
+26 25 25
+27 26 26
+28 27 27
+29 28 28
+30 29 29
+SELECT * FROM t1 ORDER BY f1 ASC, f0 LIMIT 0;
+f0 f1 f2
+SELECT * FROM t1 ORDER BY f2 DESC, f0 LIMIT 30;
+f0 f1 f2
+100 99 99
+99 98 98
+98 97 97
+97 96 96
+96 95 95
+95 94 94
+94 93 93
+93 92 92
+92 91 91
+91 90 90
+10 9 9
+90 89 89
+89 88 88
+88 87 87
+87 86 86
+86 85 85
+85 84 84
+84 83 83
+83 82 82
+82 81 81
+81 80 80
+9 8 8
+80 79 79
+79 78 78
+78 77 77
+77 76 76
+76 75 75
+75 74 74
+74 73 73
+73 72 72
+SELECT * FROM t1 ORDER BY f2 DESC, f0 LIMIT 0;
+f0 f1 f2
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 20;
+f0 f1 f2
+12 11 11
+13 12 12
+14 13 13
+15 14 14
+16 15 15
+17 16 16
+18 17 17
+19 18 18
+20 19 19
+21 20 20
+22 21 21
+23 22 22
+24 23 23
+25 24 24
+26 25 25
+27 26 26
+28 27 27
+29 28 28
+30 29 29
+31 30 30
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 0;
+f0 f1 f2
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 10 OFFSET 10;
+f0 f1 f2
+22 21 21
+23 22 22
+24 23 23
+25 24 24
+26 25 25
+27 26 26
+28 27 27
+29 28 28
+30 29 29
+31 30 30
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 0 OFFSET 10;
+f0 f1 f2
+set sort_buffer_size= 32768;
+CREATE TEMPORARY TABLE tmp (f1 int, f2 varchar(20));
+INSERT INTO tmp SELECT f1, f2 FROM t1;
+INSERT INTO t1(f1, f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1, f2 FROM t1;
+INSERT INTO t1(f1, f2) SELECT * FROM tmp;
+SELECT * FROM t1 ORDER BY f1 ASC, f0 LIMIT 30;
+f0 f1 f2
+1 0 0
+101 0 0
+201 0 0
+301 0 0
+401 0 0
+2 1 1
+102 1 1
+202 1 1
+302 1 1
+402 1 1
+3 2 2
+103 2 2
+203 2 2
+303 2 2
+403 2 2
+4 3 3
+104 3 3
+204 3 3
+304 3 3
+404 3 3
+5 4 4
+105 4 4
+205 4 4
+305 4 4
+405 4 4
+6 5 5
+106 5 5
+206 5 5
+306 5 5
+406 5 5
+SELECT * FROM t1 ORDER BY f1 ASC, f0 LIMIT 0;
+f0 f1 f2
+SELECT * FROM t1 ORDER BY f2 DESC, f0 LIMIT 30;
+f0 f1 f2
+100 99 99
+200 99 99
+300 99 99
+400 99 99
+500 99 99
+99 98 98
+199 98 98
+299 98 98
+399 98 98
+499 98 98
+98 97 97
+198 97 97
+298 97 97
+398 97 97
+498 97 97
+97 96 96
+197 96 96
+297 96 96
+397 96 96
+497 96 96
+96 95 95
+196 95 95
+296 95 95
+396 95 95
+496 95 95
+95 94 94
+195 94 94
+295 94 94
+395 94 94
+495 94 94
+SELECT * FROM t1 ORDER BY f2 DESC, f0 LIMIT 0;
+f0 f1 f2
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 20;
+f0 f1 f2
+12 11 11
+112 11 11
+212 11 11
+312 11 11
+412 11 11
+13 12 12
+113 12 12
+213 12 12
+313 12 12
+413 12 12
+14 13 13
+114 13 13
+214 13 13
+314 13 13
+414 13 13
+15 14 14
+115 14 14
+215 14 14
+315 14 14
+415 14 14
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 0;
+f0 f1 f2
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 10 OFFSET 10;
+f0 f1 f2
+14 13 13
+114 13 13
+214 13 13
+314 13 13
+414 13 13
+15 14 14
+115 14 14
+215 14 14
+315 14 14
+415 14 14
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 0 OFFSET 10;
+f0 f1 f2
+set sort_buffer_size= 32768;
+SELECT SQL_CALC_FOUND_ROWS * FROM t1
+ORDER BY f1, f0 LIMIT 30;
+f0 f1 f2
+1 0 0
+101 0 0
+201 0 0
+301 0 0
+401 0 0
+2 1 1
+102 1 1
+202 1 1
+302 1 1
+402 1 1
+3 2 2
+103 2 2
+203 2 2
+303 2 2
+403 2 2
+4 3 3
+104 3 3
+204 3 3
+304 3 3
+404 3 3
+5 4 4
+105 4 4
+205 4 4
+305 4 4
+405 4 4
+6 5 5
+106 5 5
+206 5 5
+306 5 5
+406 5 5
+SELECT FOUND_ROWS();
+FOUND_ROWS()
+500
+SELECT SQL_CALC_FOUND_ROWS * FROM t1
+ORDER BY f1, f0 LIMIT 0;
+f0 f1 f2
+SELECT FOUND_ROWS();
+FOUND_ROWS()
+500
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE f1>10
+ORDER BY f2, f0 LIMIT 20;
+f0 f1 f2
+12 11 11
+112 11 11
+212 11 11
+312 11 11
+412 11 11
+13 12 12
+113 12 12
+213 12 12
+313 12 12
+413 12 12
+14 13 13
+114 13 13
+214 13 13
+314 13 13
+414 13 13
+15 14 14
+115 14 14
+215 14 14
+315 14 14
+415 14 14
+SELECT FOUND_ROWS();
+FOUND_ROWS()
+445
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE f1>10
+ORDER BY f2, f0 LIMIT 0;
+f0 f1 f2
+SELECT FOUND_ROWS();
+FOUND_ROWS()
+445
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE f1>10
+ORDER BY f2, f0 LIMIT 10 OFFSET 10;
+f0 f1 f2
+14 13 13
+114 13 13
+214 13 13
+314 13 13
+414 13 13
+15 14 14
+115 14 14
+215 14 14
+315 14 14
+415 14 14
+SELECT FOUND_ROWS();
+FOUND_ROWS()
+445
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE f1>10
+ORDER BY f2, f0 LIMIT 0 OFFSET 10;
+f0 f1 f2
+SELECT FOUND_ROWS();
+FOUND_ROWS()
+445
+set sort_buffer_size= 327680;
+SELECT * FROM t1 JOIN tmp on t1.f2=tmp.f2
+ORDER BY tmp.f1, f0 LIMIT 30;
+f0 f1 f2 f1 f2
+1 0 0 0 0
+1 0 0 0 0
+1 0 0 0 0
+101 0 0 0 0
+101 0 0 0 0
+101 0 0 0 0
+201 0 0 0 0
+201 0 0 0 0
+201 0 0 0 0
+301 0 0 0 0
+301 0 0 0 0
+301 0 0 0 0
+401 0 0 0 0
+401 0 0 0 0
+401 0 0 0 0
+2 1 1 1 1
+2 1 1 1 1
+2 1 1 1 1
+102 1 1 1 1
+102 1 1 1 1
+102 1 1 1 1
+202 1 1 1 1
+202 1 1 1 1
+202 1 1 1 1
+302 1 1 1 1
+302 1 1 1 1
+302 1 1 1 1
+402 1 1 1 1
+402 1 1 1 1
+402 1 1 1 1
+SELECT * FROM t1 JOIN tmp on t1.f2=tmp.f2
+ORDER BY tmp.f1, f0 LIMIT 30 OFFSET 30;
+f0 f1 f2 f1 f2
+3 2 2 2 2
+3 2 2 2 2
+3 2 2 2 2
+103 2 2 2 2
+103 2 2 2 2
+103 2 2 2 2
+203 2 2 2 2
+203 2 2 2 2
+203 2 2 2 2
+303 2 2 2 2
+303 2 2 2 2
+303 2 2 2 2
+403 2 2 2 2
+403 2 2 2 2
+403 2 2 2 2
+4 3 3 3 3
+4 3 3 3 3
+4 3 3 3 3
+104 3 3 3 3
+104 3 3 3 3
+104 3 3 3 3
+204 3 3 3 3
+204 3 3 3 3
+204 3 3 3 3
+304 3 3 3 3
+304 3 3 3 3
+304 3 3 3 3
+404 3 3 3 3
+404 3 3 3 3
+404 3 3 3 3
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 JOIN tmp on t1.f2=tmp.f2
+ORDER BY tmp.f1, f0 LIMIT 30 OFFSET 30;
+f0 f1 f2 f1 f2
+3 2 2 2 2
+3 2 2 2 2
+3 2 2 2 2
+103 2 2 2 2
+103 2 2 2 2
+103 2 2 2 2
+203 2 2 2 2
+203 2 2 2 2
+203 2 2 2 2
+303 2 2 2 2
+303 2 2 2 2
+303 2 2 2 2
+403 2 2 2 2
+403 2 2 2 2
+403 2 2 2 2
+4 3 3 3 3
+4 3 3 3 3
+4 3 3 3 3
+104 3 3 3 3
+104 3 3 3 3
+104 3 3 3 3
+204 3 3 3 3
+204 3 3 3 3
+204 3 3 3 3
+304 3 3 3 3
+304 3 3 3 3
+304 3 3 3 3
+404 3 3 3 3
+404 3 3 3 3
+404 3 3 3 3
+SELECT FOUND_ROWS();
+FOUND_ROWS()
+1500
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 JOIN tmp on t1.f2=tmp.f2
+WHERE t1.f2>20
+ORDER BY tmp.f1, f0 LIMIT 30 OFFSET 30;
+f0 f1 f2 f1 f2
+24 23 23 23 23
+24 23 23 23 23
+24 23 23 23 23
+124 23 23 23 23
+124 23 23 23 23
+124 23 23 23 23
+224 23 23 23 23
+224 23 23 23 23
+224 23 23 23 23
+324 23 23 23 23
+324 23 23 23 23
+324 23 23 23 23
+424 23 23 23 23
+424 23 23 23 23
+424 23 23 23 23
+25 24 24 24 24
+25 24 24 24 24
+25 24 24 24 24
+125 24 24 24 24
+125 24 24 24 24
+125 24 24 24 24
+225 24 24 24 24
+225 24 24 24 24
+225 24 24 24 24
+325 24 24 24 24
+325 24 24 24 24
+325 24 24 24 24
+425 24 24 24 24
+425 24 24 24 24
+425 24 24 24 24
+SELECT FOUND_ROWS();
+FOUND_ROWS()
+1185
+CREATE VIEW v1 as SELECT * FROM t1 ORDER BY f1, f0 LIMIT 30;
+SELECT * FROM v1;
+f0 f1 f2
+1 0 0
+101 0 0
+201 0 0
+301 0 0
+401 0 0
+2 1 1
+102 1 1
+202 1 1
+302 1 1
+402 1 1
+3 2 2
+103 2 2
+203 2 2
+303 2 2
+403 2 2
+4 3 3
+104 3 3
+204 3 3
+304 3 3
+404 3 3
+5 4 4
+105 4 4
+205 4 4
+305 4 4
+405 4 4
+6 5 5
+106 5 5
+206 5 5
+306 5 5
+406 5 5
+drop view v1;
+CREATE VIEW v1 as SELECT * FROM t1 ORDER BY f1, f0 LIMIT 100;
+SELECT * FROM v1 ORDER BY f2, f0 LIMIT 30;
+f0 f1 f2
+1 0 0
+101 0 0
+201 0 0
+301 0 0
+401 0 0
+2 1 1
+102 1 1
+202 1 1
+302 1 1
+402 1 1
+11 10 10
+111 10 10
+211 10 10
+311 10 10
+411 10 10
+12 11 11
+112 11 11
+212 11 11
+312 11 11
+412 11 11
+13 12 12
+113 12 12
+213 12 12
+313 12 12
+413 12 12
+14 13 13
+114 13 13
+214 13 13
+314 13 13
+414 13 13
+CREATE VIEW v2 as SELECT * FROM t1 ORDER BY f2, f0 LIMIT 100;
+SELECT * FROM v1 JOIN v2 on v1.f1=v2.f1 ORDER BY v1.f2,v1.f0,v2.f0
+LIMIT 30;
+f0 f1 f2 f0 f1 f2
+1 0 0 1 0 0
+1 0 0 101 0 0
+1 0 0 201 0 0
+1 0 0 301 0 0
+1 0 0 401 0 0
+101 0 0 1 0 0
+101 0 0 101 0 0
+101 0 0 201 0 0
+101 0 0 301 0 0
+101 0 0 401 0 0
+201 0 0 1 0 0
+201 0 0 101 0 0
+201 0 0 201 0 0
+201 0 0 301 0 0
+201 0 0 401 0 0
+301 0 0 1 0 0
+301 0 0 101 0 0
+301 0 0 201 0 0
+301 0 0 301 0 0
+301 0 0 401 0 0
+401 0 0 1 0 0
+401 0 0 101 0 0
+401 0 0 201 0 0
+401 0 0 301 0 0
+401 0 0 401 0 0
+2 1 1 2 1 1
+2 1 1 102 1 1
+2 1 1 202 1 1
+2 1 1 302 1 1
+2 1 1 402 1 1
+SELECT floor(f1/10) f3, count(f2) FROM t1
+GROUP BY 1 ORDER BY 2,1 LIMIT 5;
+f3 count(f2)
+0 50
+1 50
+2 50
+3 50
+4 50
+SELECT floor(f1/10) f3, count(f2) FROM t1
+GROUP BY 1 ORDER BY 2,1 LIMIT 0;
+f3 count(f2)
+CREATE PROCEDURE wl1393_sp_test()
+BEGIN
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 30;
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 15 OFFSET 15;
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE f1>10
+ORDER BY f2, f0 LIMIT 15 OFFSET 15;
+SELECT FOUND_ROWS();
+SELECT * FROM v1 ORDER BY f2, f0 LIMIT 30;
+END|
+CALL wl1393_sp_test()|
+f0 f1 f2
+12 11 11
+112 11 11
+212 11 11
+312 11 11
+412 11 11
+13 12 12
+113 12 12
+213 12 12
+313 12 12
+413 12 12
+14 13 13
+114 13 13
+214 13 13
+314 13 13
+414 13 13
+15 14 14
+115 14 14
+215 14 14
+315 14 14
+415 14 14
+16 15 15
+116 15 15
+216 15 15
+316 15 15
+416 15 15
+17 16 16
+117 16 16
+217 16 16
+317 16 16
+417 16 16
+f0 f1 f2
+15 14 14
+115 14 14
+215 14 14
+315 14 14
+415 14 14
+16 15 15
+116 15 15
+216 15 15
+316 15 15
+416 15 15
+17 16 16
+117 16 16
+217 16 16
+317 16 16
+417 16 16
+f0 f1 f2
+15 14 14
+115 14 14
+215 14 14
+315 14 14
+415 14 14
+16 15 15
+116 15 15
+216 15 15
+316 15 15
+416 15 15
+17 16 16
+117 16 16
+217 16 16
+317 16 16
+417 16 16
+FOUND_ROWS()
+445
+f0 f1 f2
+1 0 0
+101 0 0
+201 0 0
+301 0 0
+401 0 0
+2 1 1
+102 1 1
+202 1 1
+302 1 1
+402 1 1
+11 10 10
+111 10 10
+211 10 10
+311 10 10
+411 10 10
+12 11 11
+112 11 11
+212 11 11
+312 11 11
+412 11 11
+13 12 12
+113 12 12
+213 12 12
+313 12 12
+413 12 12
+14 13 13
+114 13 13
+214 13 13
+314 13 13
+414 13 13
+DROP PROCEDURE wl1393_sp_test|
+SELECT d1.f1, d1.f2 FROM t1
+LEFT JOIN (SELECT * FROM t1 ORDER BY f1 LIMIT 30) d1 on t1.f1=d1.f1
+ORDER BY d1.f2 DESC LIMIT 30;
+f1 f2
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+5 5
+4 4
+4 4
+4 4
+4 4
+4 4
+SELECT * FROM t1 WHERE f1 = (SELECT f1 FROM t1 ORDER BY 1 LIMIT 1);
+f0 f1 f2
+1 0 0
+101 0 0
+201 0 0
+301 0 0
+401 0 0
+SELECT * FROM t1 WHERE f1 = (SELECT f1 FROM t1 ORDER BY 1 LIMIT 2);
+ERROR 21000: Subquery returns more than 1 row
+DROP TABLE t1, tmp;
+DROP VIEW v1, v2;
+# end of WL#1393 - Optimizing filesort with small limit
+#
+# Bug #58761
+# Crash in Field::is_null in field.h on subquery in WHERE clause
+#
+CREATE TABLE t1 (
+pk INT NOT NULL AUTO_INCREMENT,
+col_int_key INT DEFAULT NULL,
+col_varchar_key VARCHAR(1) DEFAULT NULL,
+PRIMARY KEY (pk),
+KEY col_varchar_key (col_varchar_key,col_int_key)
+);
+INSERT INTO t1 VALUES (27,7,'x');
+INSERT INTO t1 VALUES (28,6,'m');
+INSERT INTO t1 VALUES (29,4,'c');
+CREATE TABLE where_subselect
+SELECT DISTINCT `pk` AS field1 , `pk` AS field2
+FROM t1 AS alias1
+WHERE alias1 . `col_int_key` > 229
+OR alias1 . `col_varchar_key` IS NOT NULL
+GROUP BY field1, field2
+;
+SELECT *
+FROM where_subselect
+WHERE (field1, field2) IN (
+SELECT DISTINCT `pk` AS field1 , `pk` AS field2
+FROM t1 AS alias1
+WHERE alias1 . `col_int_key` > 229
+OR alias1 . `col_varchar_key` IS NOT NULL
+GROUP BY field1, field2
+);
+field1 field2
+27 27
+28 28
+29 29
+DROP TABLE t1;
+DROP TABLE where_subselect;
+# End of Bug #58761
CREATE TABLE t1 (
id1 INT NULL,
id2 INT NOT NULL,
diff --git a/mysql-test/r/order_by_sortkey.result b/mysql-test/r/order_by_sortkey.result
new file mode 100644
index 00000000000..717780f0af2
--- /dev/null
+++ b/mysql-test/r/order_by_sortkey.result
@@ -0,0 +1,159 @@
+CREATE TABLE t1(
+f0 int auto_increment PRIMARY KEY,
+f1 int,
+f2 varchar(200)
+);
+INSERT INTO t1(f1, f2) VALUES
+(0,"0"),(1,"1"),(2,"2"),(3,"3"),(4,"4"),(5,"5"),
+(6,"6"),(7,"7"),(8,"8"),(9,"9"),(10,"10"),
+(11,"11"),(12,"12"),(13,"13"),(14,"14"),(15,"15"),
+(16,"16"),(17,"17"),(18,"18"),(19,"19"),(20,"20"),
+(21,"21"),(22,"22"),(23,"23"),(24,"24"),(25,"25"),
+(26,"26"),(27,"27"),(28,"28"),(29,"29"),(30,"30"),
+(31,"31"),(32,"32"),(33,"33"),(34,"34"),(35,"35"),
+(36,"36"),(37,"37"),(38,"38"),(39,"39"),(40,"40"),
+(41,"41"),(42,"42"),(43,"43"),(44,"44"),(45,"45"),
+(46,"46"),(47,"47"),(48,"48"),(49,"49"),(50,"50"),
+(51,"51"),(52,"52"),(53,"53"),(54,"54"),(55,"55"),
+(56,"56"),(57,"57"),(58,"58"),(59,"59"),(60,"60"),
+(61,"61"),(62,"62"),(63,"63"),(64,"64"),(65,"65"),
+(66,"66"),(67,"67"),(68,"68"),(69,"69"),(70,"70"),
+(71,"71"),(72,"72"),(73,"73"),(74,"74"),(75,"75"),
+(76,"76"),(77,"77"),(78,"78"),(79,"79"),(80,"80"),
+(81,"81"),(82,"82"),(83,"83"),(84,"84"),(85,"85"),
+(86,"86"),(87,"87"),(88,"88"),(89,"89"),(90,"90"),
+(91,"91"),(92,"92"),(93,"93"),(94,"94"),(95,"95"),
+(96,"96"),(97,"97"),(98,"98"),(99,"99");
+CREATE TEMPORARY TABLE tmp (f1 int, f2 varchar(20));
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+set sort_buffer_size= 32768;
+FLUSH STATUS;
+SHOW SESSION STATUS LIKE 'Sort%';
+Variable_name Value
+Sort_merge_passes 0
+Sort_range 0
+Sort_rows 0
+Sort_scan 0
+SELECT * FROM t1 ORDER BY f2 LIMIT 100;
+f0 f1 f2
+1 0 0
+101 0 0
+201 0 0
+301 0 0
+401 0 0
+501 0 0
+601 0 0
+701 0 0
+801 0 0
+901 0 0
+1001 0 0
+1101 0 0
+1201 0 0
+1301 0 0
+1401 0 0
+1501 0 0
+1601 0 0
+1701 0 0
+1801 0 0
+1901 0 0
+2001 0 0
+2101 0 0
+2201 0 0
+2301 0 0
+2401 0 0
+2501 0 0
+2601 0 0
+2701 0 0
+2801 0 0
+2901 0 0
+3001 0 0
+3101 0 0
+3201 0 0
+3301 0 0
+3401 0 0
+3501 0 0
+3601 0 0
+3701 0 0
+3801 0 0
+3901 0 0
+4001 0 0
+4101 0 0
+4201 0 0
+4301 0 0
+4401 0 0
+4501 0 0
+4601 0 0
+4701 0 0
+4801 0 0
+4901 0 0
+5001 0 0
+5101 0 0
+5201 0 0
+5301 0 0
+5401 0 0
+5501 0 0
+5601 0 0
+5701 0 0
+5801 0 0
+5901 0 0
+6001 0 0
+6101 0 0
+6201 0 0
+6301 0 0
+6401 0 0
+6501 0 0
+6601 0 0
+6701 0 0
+6801 0 0
+6901 0 0
+7001 0 0
+7101 0 0
+7201 0 0
+7301 0 0
+7401 0 0
+7501 0 0
+7601 0 0
+7701 0 0
+7801 0 0
+7901 0 0
+8001 0 0
+8101 0 0
+8201 0 0
+8301 0 0
+8401 0 0
+8501 0 0
+8601 0 0
+8701 0 0
+8801 0 0
+8901 0 0
+9001 0 0
+9101 0 0
+9201 0 0
+9301 0 0
+9401 0 0
+9501 0 0
+9601 0 0
+9701 0 0
+9801 0 0
+9901 0 0
+SHOW SESSION STATUS LIKE 'Sort%';
+Variable_name Value
+Sort_merge_passes 0
+Sort_range 0
+Sort_rows 100
+Sort_scan 1
+DROP TABLE t1, tmp;
diff --git a/mysql-test/r/parser.result b/mysql-test/r/parser.result
index 10700d0ba73..54378f16d49 100644
--- a/mysql-test/r/parser.result
+++ b/mysql-test/r/parser.result
@@ -441,7 +441,7 @@ select master_pos_wait();
ERROR 42000: Incorrect parameter count in the call to native function 'master_pos_wait'
select master_pos_wait(1);
ERROR 42000: Incorrect parameter count in the call to native function 'master_pos_wait'
-select master_pos_wait(1, 2, 3, 4);
+select master_pos_wait(1, 2, 3, 4, 5);
ERROR 42000: Incorrect parameter count in the call to native function 'master_pos_wait'
select rand(1, 2, 3);
ERROR 42000: Incorrect parameter count in the call to native function 'rand'
diff --git a/mysql-test/r/partition_datatype.result b/mysql-test/r/partition_datatype.result
index e0658f71612..8d72d8781ee 100644
--- a/mysql-test/r/partition_datatype.result
+++ b/mysql-test/r/partition_datatype.result
@@ -315,11 +315,11 @@ drop table t1;
create table t1 (a varchar(3070)) partition by key (a);
ERROR HY000: The total length of the partitioning fields is too large
create table t1 (a varchar(65533)) partition by key (a);
-ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 65535. You have to change some columns to TEXT or BLOBs
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 65535. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
create table t1 (a varchar(65534) not null) partition by key (a);
-ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 65535. You have to change some columns to TEXT or BLOBs
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 65535. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
create table t1 (a varchar(65535)) partition by key (a);
-ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 65535. You have to change some columns to TEXT or BLOBs
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 65535. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
create table t1 (a bit(27), primary key (a)) engine=myisam
partition by hash (a)
(partition p0, partition p1, partition p2);
diff --git a/mysql-test/r/partition_innodb_plugin.result b/mysql-test/r/partition_innodb_plugin.result
index 21f18f715d0..fa2ef5c6c18 100644
--- a/mysql-test/r/partition_innodb_plugin.result
+++ b/mysql-test/r/partition_innodb_plugin.result
@@ -67,7 +67,7 @@ LOCK TABLE t1 WRITE;
# ALTER fails because COMPRESSED/KEY_BLOCK_SIZE
# are incompatible with innodb_file_per_table = OFF;
ALTER TABLE t1 ADD PARTITION PARTITIONS 1;
-ERROR HY000: Got error 1478 from storage engine
+ERROR HY000: Got error 140 "Wrong create options" from storage engine
t1#P#p0.ibd
t1.frm
t1.par
@@ -76,18 +76,18 @@ t1.par
SET innodb_strict_mode = OFF;
ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
Warnings:
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
t1.frm
t1.par
ALTER TABLE t1 REBUILD PARTITION p0;
Warnings:
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
UNLOCK TABLES;
SHOW CREATE TABLE t1;
Table Create Table
diff --git a/mysql-test/r/partition_open_files_limit.result b/mysql-test/r/partition_open_files_limit.result
index 1441ba4e78e..fed32a69c44 100644
--- a/mysql-test/r/partition_open_files_limit.result
+++ b/mysql-test/r/partition_open_files_limit.result
@@ -5,7 +5,7 @@ ENGINE=MyISAM PARTITION BY KEY () PARTITIONS 1;
INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11);
# if the bug exists, then crash will happen here
ALTER TABLE t1 ADD PARTITION PARTITIONS 511;
-ERROR HY000: Out of resources when opening file '<partition file>' (Errcode: 24)
+ERROR HY000: Out of resources when opening file '<partition file>' (Errcode: 24 "Too many open files")
SELECT * FROM t1;
a
1
diff --git a/mysql-test/r/ps_1general.result b/mysql-test/r/ps_1general.result
index 0a3d16cf48e..3ca0cf8554f 100644
--- a/mysql-test/r/ps_1general.result
+++ b/mysql-test/r/ps_1general.result
@@ -533,7 +533,7 @@ drop table t2;
prepare stmt1 from ' rename table t5 to t6, t7 to t8 ' ;
create table t5 (a int) ;
execute stmt1 ;
-ERROR HY000: Can't find file: './test/t7' (errno: 2)
+ERROR HY000: Can't find file: './test/t7' (errno: 2 "No such file or directory")
create table t7 (a int) ;
execute stmt1 ;
execute stmt1 ;
diff --git a/mysql-test/r/range_vs_index_merge.result b/mysql-test/r/range_vs_index_merge.result
index cc8a345a2ff..424b1009a97 100644
--- a/mysql-test/r/range_vs_index_merge.result
+++ b/mysql-test/r/range_vs_index_merge.result
@@ -1576,7 +1576,7 @@ update t1 set c1=lpad(id+1000, 12, ' '), c2=lpad(id+10000, 15, ' ');
alter table t1 add unique index (c1), add unique index (c2), add index (c3);
analyze table t1;
Table Op Msg_type Msg_text
-test.t1 analyze status OK
+test.t1 analyze status Table is already up to date
explain
select * from t1 where (c1=' 100000' or c2=' 2000000');
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/r/row-checksum-old.result b/mysql-test/r/row-checksum-old.result
index ef523463860..87f0bb8af2d 100644
--- a/mysql-test/r/row-checksum-old.result
+++ b/mysql-test/r/row-checksum-old.result
@@ -73,7 +73,7 @@ test.t1 4108368782
drop table if exists t1;
create table t1 (a int null, v varchar(100)) engine=innodb checksum=0 row_format=fixed;
Warnings:
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
insert into t1 values(null, null), (1, "hello");
checksum table t1;
Table Checksum
diff --git a/mysql-test/r/row-checksum.result b/mysql-test/r/row-checksum.result
index fb8a1260a1d..9e58d6fa96e 100644
--- a/mysql-test/r/row-checksum.result
+++ b/mysql-test/r/row-checksum.result
@@ -73,7 +73,7 @@ test.t1 3885665021
drop table if exists t1;
create table t1 (a int null, v varchar(100)) engine=innodb checksum=0 row_format=fixed;
Warnings:
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
insert into t1 values(null, null), (1, "hello");
checksum table t1;
Table Checksum
diff --git a/mysql-test/r/server_id.require b/mysql-test/r/server_id.require
deleted file mode 100644
index adffcc483b1..00000000000
--- a/mysql-test/r/server_id.require
+++ /dev/null
@@ -1,2 +0,0 @@
-Variable_name Value
-server_id 1
diff --git a/mysql-test/r/server_id1.require b/mysql-test/r/server_id1.require
deleted file mode 100644
index 666c94ef633..00000000000
--- a/mysql-test/r/server_id1.require
+++ /dev/null
@@ -1,2 +0,0 @@
-Variable_name Value
-server_id 102
diff --git a/mysql-test/r/show_explain.result b/mysql-test/r/show_explain.result
new file mode 100644
index 00000000000..da132a102e2
--- /dev/null
+++ b/mysql-test/r/show_explain.result
@@ -0,0 +1,1100 @@
+drop table if exists t0, t1, t2, t3, t4;
+drop view if exists v1;
+SET @old_debug= @@session.debug;
+set debug_sync='RESET';
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (a int);
+insert into t1 select A.a + 10*B.a + 100*C.a from t0 A, t0 B, t0 C;
+alter table t1 add b int, add c int, add filler char(32);
+update t1 set b=a, c=a, filler='fooo';
+alter table t1 add key(a), add key(b);
+show explain for 2000000000;
+ERROR HY000: Unknown thread id: 2000000000
+show explain for (select max(a) from t0);
+ERROR HY000: You may only use constant expressions in this statement
+SET @old_debug= @@session.debug;
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+show explain for $thr1;
+ERROR HY000: Target is not running an EXPLAINable command
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select count(*) from t1 where a < 100000;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index a a 5 NULL 1000 Using where; Using index
+Warnings:
+Note 1003 select count(*) from t1 where a < 100000
+count(*)
+1000
+select max(c) from t1 where a < 10;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 5 NULL 10 Using index condition
+Warnings:
+Note 1003 select max(c) from t1 where a < 10
+max(c)
+9
+# We can catch EXPLAIN, too.
+set @show_expl_tmp= @@optimizer_switch;
+set optimizer_switch='index_condition_pushdown=on,mrr=on,mrr_sort_keys=on';
+explain select max(c) from t1 where a < 10;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 5 NULL 10 Using index condition; Rowid-ordered scan
+Warnings:
+Note 1003 explain select max(c) from t1 where a < 10
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a a 5 NULL 10 Using index condition; Rowid-ordered scan
+set optimizer_switch= @show_expl_tmp;
+set debug_dbug=@old_debug;
+# UNION, first branch
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+explain select a from t0 A union select a+1 from t0 B;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY A ALL NULL NULL NULL NULL 10
+2 UNION B ALL NULL NULL NULL NULL 10
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 explain select a from t0 A union select a+1 from t0 B
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY A ALL NULL NULL NULL NULL 10
+2 UNION B ALL NULL NULL NULL NULL 10
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL
+set debug_dbug=@old_debug;
+# UNION, second branch
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+explain select a from t0 A union select a+1 from t0 B;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY A ALL NULL NULL NULL NULL 10
+2 UNION B ALL NULL NULL NULL NULL 10
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 explain select a from t0 A union select a+1 from t0 B
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY A ALL NULL NULL NULL NULL 10
+2 UNION B ALL NULL NULL NULL NULL 10
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL
+set debug_dbug=@old_debug;
+# Uncorrelated subquery, select
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select a, (select max(a) from t0 B) from t0 A where a<1;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY A ALL NULL NULL NULL NULL 10 Using where
+2 SUBQUERY B ALL NULL NULL NULL NULL 10
+Warnings:
+Note 1003 select a, (select max(a) from t0 B) from t0 A where a<1
+a (select max(a) from t0 B)
+0 9
+set debug_dbug=@old_debug;
+# Uncorrelated subquery, explain
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+explain select a, (select max(a) from t0 B) from t0 A where a<1;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY A ALL NULL NULL NULL NULL 10 Using where
+2 SUBQUERY B ALL NULL NULL NULL NULL 10
+Warnings:
+Note 1003 explain select a, (select max(a) from t0 B) from t0 A where a<1
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY A ALL NULL NULL NULL NULL 10 Using where
+2 SUBQUERY B ALL NULL NULL NULL NULL 10
+set debug_dbug=@old_debug;
+# correlated subquery, select
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY a ALL NULL NULL NULL NULL 10 Using where
+2 DEPENDENT SUBQUERY b ALL NULL NULL NULL NULL 10 Using where
+Warnings:
+Note 1003 select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1
+a (select max(a) from t0 b where b.a+a.a<10)
+0 9
+set debug_dbug=@old_debug;
+# correlated subquery, explain
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY a ALL NULL NULL NULL NULL 10 Using where
+2 DEPENDENT SUBQUERY b ALL NULL NULL NULL NULL 10 Using where
+Warnings:
+Note 1003 select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1
+a (select max(a) from t0 b where b.a+a.a<10)
+0 9
+set debug_dbug=@old_debug;
+# correlated subquery, select, while inside the subquery
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY a ALL NULL NULL NULL NULL 10 Using where
+2 DEPENDENT SUBQUERY b ALL NULL NULL NULL NULL 10 Using where
+Warnings:
+Note 1003 select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1
+a (select max(a) from t0 b where b.a+a.a<10)
+0 9
+set debug_dbug=@old_debug;
+# correlated subquery, explain, while inside the subquery
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY a ALL NULL NULL NULL NULL 10 Using where
+2 DEPENDENT SUBQUERY b ALL NULL NULL NULL NULL 10 Using where
+Warnings:
+Note 1003 select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1
+a (select max(a) from t0 b where b.a+a.a<10)
+0 9
+set debug_dbug=@old_debug;
+# correlated subquery, explain, while inside the subquery
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+a (select max(a) from t0 b where b.a+a.a<10)
+0 9
+set debug_dbug=@old_debug;
+# Try to do SHOW EXPLAIN for a query that runs a SET command:
+# I've found experimentally that select_id==2 here...
+#
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+set @foo= (select max(a) from t0 where sin(a) >0);
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+set debug_dbug=@old_debug;
+#
+# Attempt SHOW EXPLAIN for an UPDATE
+#
+create table t2 as select a as a, a as dummy from t0 limit 2;
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+update t2 set dummy=0 where (select max(a) from t0 where t2.a + t0.a <3) >3 ;
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+drop table t2;
+set debug_dbug=@old_debug;
+#
+# Attempt SHOW EXPLAIN for a DELETE
+#
+create table t2 as select a as a, a as dummy from t0 limit 2;
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+delete from t2 where (select max(a) from t0 where t2.a + t0.a <3) >3 ;
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+drop table t2;
+set debug_dbug=@old_debug;
+#
+# Multiple SHOW EXPLAIN calls for one select
+#
+create table t2 as select a as a, a as dummy from t0 limit 3;
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select t2.a, ((select max(a) from t0 where t2.a + t0.a <3) >3) as SUBQ from t2;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3
+2 DEPENDENT SUBQUERY t0 ALL NULL NULL NULL NULL 10 Using where
+Warnings:
+Note 1003 select t2.a, ((select max(a) from t0 where t2.a + t0.a <3) >3) as SUBQ from t2
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3
+2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL Query plan already deleted
+Warnings:
+Note 1003 select t2.a, ((select max(a) from t0 where t2.a + t0.a <3) >3) as SUBQ from t2
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 3
+2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL Query plan already deleted
+Warnings:
+Note 1003 select t2.a, ((select max(a) from t0 where t2.a + t0.a <3) >3) as SUBQ from t2
+a SUBQ
+0 0
+1 0
+2 0
+drop table t2;
+set debug_dbug=@old_debug;
+#
+# SHOW EXPLAIN for SELECT ... ORDER BY with "Using filesort"
+#
+explain select * from t0 order by a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using filesort
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+set @show_explain_probe_select_id=1;
+select * from t0 order by a;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using filesort
+Warnings:
+Note 1003 select * from t0 order by a
+a
+0
+1
+2
+3
+4
+5
+6
+7
+8
+9
+set debug_dbug=@old_debug;
+#
+# SHOW EXPLAIN for SELECT ... with "Using temporary"
+#
+explain select distinct a from t0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using temporary
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+set @show_explain_probe_select_id=1;
+select distinct a from t0;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using temporary
+Warnings:
+Note 1003 select distinct a from t0
+a
+0
+1
+2
+3
+4
+5
+6
+7
+8
+9
+set debug_dbug=@old_debug;
+#
+# SHOW EXPLAIN for SELECT ... with "Using temporary; Using filesort"
+#
+explain select distinct a from t0;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using temporary
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+set @show_explain_probe_select_id=1;
+select distinct a from t0;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using temporary
+Warnings:
+Note 1003 select distinct a from t0
+a
+0
+1
+2
+3
+4
+5
+6
+7
+8
+9
+set debug_dbug=@old_debug;
+#
+# MDEV-238: SHOW EXPLAIN: Server crashes in JOIN::print_explain with FROM subquery and GROUP BY
+#
+CREATE TABLE t2 ( a INT );
+INSERT INTO t2 VALUES (1),(2),(1),(4),(2);
+explain SELECT alias.a FROM t2, ( SELECT * FROM t2 ) AS alias GROUP BY alias.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort
+1 SIMPLE t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
+set debug_dbug='+d,show_explain_in_find_all_keys';
+SELECT alias.a FROM t2, ( SELECT * FROM t2 ) AS alias GROUP BY alias.a;
+# FIXED by "conservative assumptions about when QEP is available" fix:
+# NOTE: current code will not show "Using join buffer":
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+a
+1
+2
+4
+set debug_dbug=@old_debug;
+DROP TABLE t2;
+#
+# MDEV-239: Assertion `field_types == 0 ... ' failed in Protocol_text::store(double, uint32, String*) with
+# SHOW EXPLAIN over EXPLAIN EXTENDED
+#
+CREATE TABLE t2 (a INT);
+INSERT INTO t2 VALUES (1),(2),(1),(4),(2);
+EXPLAIN EXTENDED SELECT alias.a FROM t2, ( SELECT * FROM t2 ) AS alias GROUP BY alias.a ;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00 Using temporary; Using filesort
+1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00 Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` join `test`.`t2` group by `test`.`t2`.`a`
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+EXPLAIN EXTENDED SELECT alias.a FROM t2, ( SELECT * FROM t2 ) AS alias GROUP BY alias.a ;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort
+1 SIMPLE t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 EXPLAIN EXTENDED SELECT alias.a FROM t2, ( SELECT * FROM t2 ) AS alias GROUP BY alias.a
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00 Using temporary; Using filesort
+1 SIMPLE t2 ALL NULL NULL NULL NULL 5 100.00 Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t2` join `test`.`t2` group by `test`.`t2`.`a`
+set debug_dbug=@old_debug;
+DROP TABLE t2;
+#
+# MDEV-240: SHOW EXPLAIN: Assertion `this->optimized == 2' failed in
+# JOIN::print_explain on query with a JOIN, TEMPTABLE view,
+#
+CREATE TABLE t3 (a INT);
+CREATE ALGORITHM=TEMPTABLE VIEW v1 AS SELECT * FROM t3;
+INSERT INTO t3 VALUES (8);
+CREATE TABLE t2 (b INT);
+INSERT INTO t2 VALUES (4),(5),(6),(7),(8),(9);
+explain SELECT * FROM v1, t2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY <derived2> system NULL NULL NULL NULL 1
+1 PRIMARY t2 ALL NULL NULL NULL NULL 6
+2 DERIVED t3 system NULL NULL NULL NULL 1
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+SELECT * FROM v1, t2;
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+a b
+8 4
+8 5
+8 6
+8 7
+8 8
+8 9
+set debug_dbug=@old_debug;
+DROP VIEW v1;
+DROP TABLE t2, t3;
+#
+# MDEV-267: SHOW EXPLAIN: Server crashes in JOIN::print_explain on most of queries
+#
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+select sleep(1);
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used
+Warnings:
+Note 1003 select sleep(1)
+sleep(1)
+0
+set debug_dbug=@old_debug;
+#
+# Same as above, but try another reason for JOIN to be degenerate
+#
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+select * from t0 where 1>10;
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+a
+set debug_dbug=@old_debug;
+#
+# Same as above, but try another reason for JOIN to be degenerate (2)
+#
+create table t3(a int primary key);
+insert into t3 select a from t0;
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+select * from t0,t3 where t3.a=112233;
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+a a
+set debug_dbug=@old_debug;
+drop table t3;
+#
+# MDEV-270: SHOW EXPLAIN: server crashes in JOIN::print_explain on a query with
+# select tables optimized away
+#
+CREATE TABLE t2 (pk INT PRIMARY KEY, a INT ) ENGINE=MyISAM;
+INSERT INTO t2 VALUES
+(1,4),(2,62),(3,7),(4,1),(5,0),(6,7),(7,7),(8,1),(9,7),(10,1),
+(11,5),(12,2),(13,0),(14,1),(15,8),(16,1),(17,1),(18,9),(19,1),(20,5) ;
+explain SELECT * FROM t2 WHERE a =
+(SELECT MAX(a) FROM t2
+WHERE pk= (SELECT MAX(pk) FROM t2 WHERE pk = 3)
+);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 20 Using where
+2 SUBQUERY t2 const PRIMARY PRIMARY 4 const 1 Using where
+3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_do_select';
+SELECT * FROM t2 WHERE a =
+(SELECT MAX(a) FROM t2
+WHERE pk= (SELECT MAX(pk) FROM t2 WHERE pk = 3)
+);
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 20 Using where
+2 SUBQUERY t2 const PRIMARY PRIMARY 4 const 1 Using where
+3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
+Warnings:
+Note 1003 SELECT * FROM t2 WHERE a =
+(SELECT MAX(a) FROM t2
+WHERE pk= (SELECT MAX(pk) FROM t2 WHERE pk = 3)
+)
+pk a
+3 7
+6 7
+7 7
+9 7
+set debug_dbug=@old_debug;
+drop table t2;
+#
+# MDEV-273: SHOW EXPLAIN: server crashes in JOIN::print_explain on a query with impossible WHERE
+#
+CREATE TABLE t2 (a1 INT, KEY(a1)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES
+(4),(6),(7),(1),(0),(7),(7),(1),(7),(1),
+(5),(2),(0),(1),(8),(1),(1),(9),(1),(5);
+CREATE TABLE t3 (b1 INT) ENGINE=MyISAM;
+INSERT INTO t3 VALUES
+(4),(5),(8),(4),(8),(2),(9),(6),(4),(8),
+(3),(5),(9),(6),(8),(3),(2),(6),(3),(1),
+(4),(3),(1),(7),(0),(0),(9),(5),(9),(0),
+(2),(2),(5),(9),(1),(4),(8),(6),(5),(5),
+(1),(7),(2),(8),(9),(3),(2),(6),(6),(5),
+(4),(3),(2),(7),(4),(6),(0),(8),(5),(8),
+(2),(9),(7),(5),(7),(0),(4),(3),(1),(0),
+(6),(2),(8),(3),(7),(3),(5),(5),(1),(2),
+(1),(7),(1),(9),(9),(8),(3);
+CREATE TABLE t4 (c1 INT) ENGINE=MyISAM;
+EXPLAIN
+SELECT count(*) FROM t2, t3
+WHERE a1 < ALL (
+SELECT a1 FROM t2
+WHERE a1 IN ( SELECT a1 FROM t2, t4 )
+);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 index NULL a1 5 NULL 20 Using where; Using index
+1 PRIMARY t3 ALL NULL NULL NULL NULL 87 Using join buffer (flat, BNL join)
+2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_do_select';
+SELECT count(*) FROM t2, t3
+WHERE a1 < ALL (
+SELECT a1 FROM t2
+WHERE a1 IN ( SELECT a1 FROM t2, t4 )
+);
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 index NULL a1 5 NULL 20 Using where; Using index
+1 PRIMARY t3 ALL NULL NULL NULL NULL 87 Using join buffer (flat, BNL join)
+2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL no matching row in const table
+Warnings:
+Note 1003 SELECT count(*) FROM t2, t3
+WHERE a1 < ALL (
+SELECT a1 FROM t2
+WHERE a1 IN ( SELECT a1 FROM t2, t4 )
+)
+count(*)
+1740
+set debug_dbug=@old_debug;
+drop table t2, t3, t4;
+#
+# MDEV-275: SHOW EXPLAIN: server crashes in JOIN::print_explain with IN subquery and aggregate function
+#
+CREATE TABLE t2 ( `pk` INT NOT NULL PRIMARY KEY, `a1` INT NOT NULL, KEY(`a1`)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES
+(1,5),(2,4),(3,6),(4,9),(5,2),(6,8),(7,4),(8,8),(9,0),(10,43),
+(11,23),(12,3),(13,45),(14,16),(15,2),(16,33),(17,2),(18,5),(19,9),(20,2);
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+SELECT * FROM t2 WHERE (5, 78) IN (SELECT `a1`, MAX(`a1`) FROM t2 GROUP BY `a1`);
+show explain for $thr2;
+ERROR HY000: Target is not running an EXPLAINable command
+pk a1
+set debug_dbug=@old_debug;
+DROP TABLE t2;
+DROP TABLE t1;
+#
+# MDEV-305: SHOW EXPLAIN: ref returned by SHOW EXPLAIN is different from the normal EXPLAIN ('const' vs empty string)
+#
+CREATE TABLE t1(a INT, KEY(a));
+INSERT INTO t1 VALUES (3),(1),(5),(1);
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+SELECT 'test' FROM t1 WHERE a=1;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref a a 5 const 1 Using index
+Warnings:
+Note 1003 SELECT 'test' FROM t1 WHERE a=1
+test
+test
+test
+set debug_dbug=@old_debug;
+DROP TABLE t1;
+#
+# MDEV-299: SHOW EXPLAIN: Plan produced by SHOW EXPLAIN changes back and forth during query execution
+#
+create table t1 (key1 int, col1 int, col2 int, filler char(100), key(key1));
+insert into t1 select A.a+ 10 * B.a, 10, 10, 'filler-data' from t0 A, t0 B;
+update t1 set col1=3, col2=10 where key1=1;
+update t1 set col1=3, col2=1000 where key1=2;
+update t1 set col1=3, col2=10 where key1=3;
+update t1 set col1=3, col2=1000 where key1=4;
+set @tmp_mdev299_jcl= @@join_cache_level;
+set join_cache_level=0;
+explain select count(*) from t1 A, t1 B where B.key1 < A.col2 and A.col1=3 AND B.col2 + 1 < 100;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE A ALL NULL NULL NULL NULL 100 Using where
+1 SIMPLE B ALL key1 NULL NULL NULL 100 Range checked for each record (index map: 0x1)
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_test_if_quick_select';
+select count(*) from t1 A, t1 B where B.key1 < A.col2 and A.col1=3 AND B.col2 + 1 < 100;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE A ALL NULL NULL NULL NULL 100 Using where
+1 SIMPLE B ALL key1 NULL NULL NULL 100 Range checked for each record (index map: 0x1)
+Warnings:
+Note 1003 select count(*) from t1 A, t1 B where B.key1 < A.col2 and A.col1=3 AND B.col2 + 1 < 100
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE A ALL NULL NULL NULL NULL 100 Using where
+1 SIMPLE B ALL key1 NULL NULL NULL 100 Range checked for each record (index map: 0x1)
+Warnings:
+Note 1003 select count(*) from t1 A, t1 B where B.key1 < A.col2 and A.col1=3 AND B.col2 + 1 < 100
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE A ALL NULL NULL NULL NULL 100 Using where
+1 SIMPLE B ALL key1 NULL NULL NULL 100 Range checked for each record (index map: 0x1)
+Warnings:
+Note 1003 select count(*) from t1 A, t1 B where B.key1 < A.col2 and A.col1=3 AND B.col2 + 1 < 100
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE A ALL NULL NULL NULL NULL 100 Using where
+1 SIMPLE B ALL key1 NULL NULL NULL 100 Range checked for each record (index map: 0x1)
+Warnings:
+Note 1003 select count(*) from t1 A, t1 B where B.key1 < A.col2 and A.col1=3 AND B.col2 + 1 < 100
+count(*)
+212
+set debug_dbug=@old_debug;
+drop table t1;
+#
+# MDEV-297: SHOW EXPLAIN: Server gets stuck until timeout occurs while
+# executing SHOW INDEX and SHOW EXPLAIN in parallel
+#
+CREATE TABLE t1(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c));
+INSERT INTO t1 (a) VALUES (3),(1),(5),(1);
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+SHOW INDEX FROM t1;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE STATISTICS ALL NULL NULL NULL NULL NULL Skip_open_table; Scanned all databases
+Warnings:
+Note 1003 SHOW INDEX FROM t1
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 1 a 1 a A NULL NULL NULL YES BTREE
+t1 1 b 1 b A NULL NULL NULL YES BTREE
+t1 1 c 1 c A NULL NULL NULL YES BTREE
+set debug_dbug=@old_debug;
+DROP TABLE t1;
+#
+# MDEV-324: SHOW EXPLAIN: Plan produced by SHOW EXPLAIN for a query with TEMPTABLE view
+# loses 'DERIVED' line on the way without saying that the plan was already deleted
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE ALGORITHM=TEMPTABLE VIEW v1 AS SELECT * FROM t1;
+EXPLAIN SELECT a + 1 FROM v1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2
+2 DERIVED t1 ALL NULL NULL NULL NULL 2
+set debug_dbug='+d,show_explain_probe_join_tab_preread';
+set @show_explain_probe_select_id=1;
+SELECT a + 1 FROM v1;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL Query plan already deleted
+Warnings:
+Note 1003 SELECT a + 1 FROM v1
+a + 1
+2
+3
+set debug_dbug=@old_debug;
+DROP VIEW v1;
+DROP TABLE t1;
+#
+# MDEV-323: SHOW EXPLAIN: Plan produced by SHOW EXPLAIN loses
+# 'UNION RESULT' line on the way without saying that the plan was already deleted
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (4),(6);
+EXPLAIN
+SELECT a FROM t1 WHERE a IN ( SELECT 1+SLEEP(0.01) UNION SELECT 2 );
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
+2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
+3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
+set debug_dbug='+d,show_explain_probe_union_read';
+SELECT a FROM t1 WHERE a IN ( SELECT 1+SLEEP(0.01) UNION SELECT 2 );
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
+2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
+3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 SELECT a FROM t1 WHERE a IN ( SELECT 1+SLEEP(0.01) UNION SELECT 2 )
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
+2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
+3 DEPENDENT UNION NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 SELECT a FROM t1 WHERE a IN ( SELECT 1+SLEEP(0.01) UNION SELECT 2 )
+a
+set debug_dbug=@old_debug;
+DROP TABLE t1;
+#
+# MDEV-327: SHOW EXPLAIN: Different select_type in plans produced by SHOW EXPLAIN
+# and standard EXPLAIN: 'SUBQUERY' vs 'DEPENDENT SUBQUERY'
+#
+CREATE TABLE t1 (a INT) ENGINE=Aria;
+INSERT INTO t1 VALUES
+(4),(6),(3),(5),(3),(246),(2),(9),(3),(8),
+(1),(8),(8),(5),(7),(5),(1),(6),(2),(9);
+CREATE TABLE t2 (b INT) ENGINE=Aria;
+INSERT INTO t2 VALUES
+(1),(7),(4),(7),(0),(2),(9),(4),(0),(9),
+(1),(3),(8),(8),(18),(84),(6),(3),(6),(6);
+EXPLAIN
+SELECT * FROM t1, ( SELECT * FROM t2 ) AS alias
+WHERE a < ALL ( SELECT b FROM t1, t2 WHERE a = b );
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 20 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 20
+3 SUBQUERY t1 ALL NULL NULL NULL NULL 20
+3 SUBQUERY t2 ALL NULL NULL NULL NULL 20 Using where
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+SELECT * FROM t1, ( SELECT * FROM t2 ) AS alias
+WHERE a < ALL ( SELECT b FROM t1, t2 WHERE a = b );
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 20 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 20
+3 SUBQUERY t1 ALL NULL NULL NULL NULL 20
+3 SUBQUERY t2 ALL NULL NULL NULL NULL 20 Using where
+Warnings:
+Note 1003 SELECT * FROM t1, ( SELECT * FROM t2 ) AS alias
+WHERE a < ALL ( SELECT b FROM t1, t2 WHERE a = b )
+a b
+set debug_dbug=@old_debug;
+DROP TABLE t1, t2;
+#
+# Test that SHOW EXPLAIN will print 'Distinct'.
+#
+CREATE TABLE t1 (a int(10) unsigned not null primary key,b int(10) unsigned);
+INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1);
+CREATE TABLE t3 (a int(10) unsigned, key(A), b text);
+INSERT INTO t3 VALUES (1,'1'),(2,'2');
+create temporary table t4 select * from t3;
+insert into t3 select * from t4;
+insert into t4 select * from t3;
+insert into t3 select * from t4;
+insert into t4 select * from t3;
+insert into t3 select * from t4;
+insert into t4 select * from t3;
+insert into t3 select * from t4;
+explain select distinct t1.a from t1,t3 where t1.a=t3.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 4 Using index; Using temporary
+1 SIMPLE t3 ref a a 5 test.t1.a 7 Using index; Distinct
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select distinct t1.a from t1,t3 where t1.a=t3.a;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 4 Using index; Using temporary
+1 SIMPLE t3 ref a a 5 test.t1.a 7 Using index; Distinct
+Warnings:
+Note 1003 select distinct t1.a from t1,t3 where t1.a=t3.a
+a
+1
+2
+set debug_dbug=@old_debug;
+drop table t1,t3,t4;
+#
+# ---------- SHOW EXPLAIN and permissions -----------------
+#
+grant ALL on test.* to test2@localhost;
+grant super on *.* to test2@localhost;
+#
+# First, make sure that user 'test2' cannot do SHOW EXPLAIN on us
+#
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select * from t0 where a < 3;
+show explain for $thr2;
+ERROR 42000: Access denied; you need (at least one of) the PROCESS privilege(s) for this operation
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using where
+Warnings:
+Note 1003 select * from t0 where a < 3
+a
+0
+1
+2
+set debug_dbug=@old_debug;
+#
+# Check that user test2 can do SHOW EXPLAIN on its own queries
+#
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select * from t0 where a < 3;
+show explain for $thr_con2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using where
+Warnings:
+Note 1003 select * from t0 where a < 3
+a
+0
+1
+2
+#
+# Now, grant test2 a PROCESSLIST permission, and see that he's able to observe us
+#
+grant process on *.* to test2@localhost;
+set debug_dbug=@old_debug;
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select * from t0 where a < 3;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using where
+Warnings:
+Note 1003 select * from t0 where a < 3
+a
+0
+1
+2
+set debug_dbug=@old_debug;
+revoke all privileges on test.* from test2@localhost;
+drop user test2@localhost;
+#
+# Test that it is possible to KILL a SHOW EXPLAIN command that's waiting
+# on its target thread
+#
+create table t1 (pk int primary key, data char(64)) engine=innodb;
+insert into t1 select A.a + 10 * B.a + 100 * C.a, 'data1' from t0 A, t0 B, t0 C;
+# Lock two threads
+set autocommit=0;
+select * from t1 where pk between 10 and 20 for update;
+pk data
+10 data1
+11 data1
+12 data1
+13 data1
+14 data1
+15 data1
+16 data1
+17 data1
+18 data1
+19 data1
+20 data1
+set autocommit=0;
+select * from t1 where pk between 10 and 20 for update;
+# do: send_eval show explain for thr2;
+kill query $thr_default;
+ERROR 70100: Query execution was interrupted
+rollback;
+pk data
+10 data1
+11 data1
+12 data1
+13 data1
+14 data1
+15 data1
+16 data1
+17 data1
+18 data1
+19 data1
+20 data1
+drop table t1;
+#
+# Check that the I_S table is invisible
+#
+select table_name from information_schema.tables where table_schema='information_schema' and table_name like '%explain%';
+table_name
+#
+# MDEV-325: SHOW EXPLAIN: Plan produced by SHOW EXPLAIN is different from standard EXPLAIN: type ALL vs 'index_merge'..
+#
+CREATE TABLE t1 (a INT, b INT, KEY(a), KEY(b)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES
+(8,0),(128,5050),(5372,8),(234,7596),(2,0),(2907,8930),(1,0),
+(0,5224),(8,7638),(960,5),(9872,1534),(0,2295),(3408,9809),
+(7,0),(1168,0),(2089,5570),(0,205),(88,1018),(0,26528),
+(0,0),(4,5567),(1444,145),(6,0),(1,7535),(7793,534),(70,9),
+(178,1),(44,5),(189,0),(3,0);
+EXPLAIN
+SELECT a+SLEEP(0.01) FROM t1
+WHERE a IN ( 255, 0 ) OR b BETWEEN 6 AND 129
+ORDER BY b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 8 Using sort_union(a,b); Using where; Using filesort
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+SELECT a+SLEEP(0.01) FROM t1
+WHERE a IN ( 255, 0 ) OR b BETWEEN 6 AND 129
+ORDER BY b;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 8 Using sort_union(a,b); Using where; Using filesort
+Warnings:
+Note 1003 SELECT a+SLEEP(0.01) FROM t1
+WHERE a IN ( 255, 0 ) OR b BETWEEN 6 AND 129
+ORDER BY b
+a+SLEEP(0.01)
+0
+5372
+70
+0
+0
+0
+0
+set debug_dbug=@old_debug;
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_do_select';
+SELECT a+SLEEP(0.01) FROM t1
+WHERE a IN ( 255, 0 ) OR b BETWEEN 6 AND 129
+ORDER BY b;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 8 Using sort_union(a,b); Using where; Using filesort
+Warnings:
+Note 1003 SELECT a+SLEEP(0.01) FROM t1
+WHERE a IN ( 255, 0 ) OR b BETWEEN 6 AND 129
+ORDER BY b
+a+SLEEP(0.01)
+0
+5372
+70
+0
+0
+0
+0
+set debug_dbug=@old_debug;
+drop table t1;
+#
+# MDEV-298: SHOW EXPLAIN: Plan returned by SHOW EXPLAIN only contains
+# 'Using temporary' while the standard EXPLAIN says 'Using temporary; Using filesort'
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),
+(10),(11),(12),(13),(14),(15),(16);
+INSERT INTO t1 SELECT t11.a FROM t1 t11, t1 t12, t1 t13;
+EXPLAIN SELECT a FROM t1 GROUP BY a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4112 Using temporary; Using filesort
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+SELECT a FROM t1 GROUP BY a;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4112 Using temporary; Using filesort
+Warnings:
+Note 1003 SELECT a FROM t1 GROUP BY a
+a
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+set debug_dbug=@old_debug;
+drop table t1;
+#
+# MDEV-408: SHOW EXPLAIN: Some values are chopped off in SHOW EXPLAIN output
+#
+CREATE TABLE t1 (a INT, b VARCHAR(35)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (3989,'Abilene'),(3873,'Akron');
+CREATE TABLE t2 (c INT, d VARCHAR(52) PRIMARY KEY, KEY(c)) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (86,'English'),(87,'Russian');
+explain SELECT SUM(a + SLEEP(0.1)) FROM t1 WHERE a IN ( SELECT c FROM t2 WHERE d < b ) OR b < 's';
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
+2 DEPENDENT SUBQUERY t2 index_subquery PRIMARY,c c 5 func 1 Using index; Using where
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+SELECT SUM(a + SLEEP(0.1)) FROM t1 WHERE a IN ( SELECT c FROM t2 WHERE d < b ) OR b < 's';
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
+2 DEPENDENT SUBQUERY t2 index_subquery PRIMARY,c c 5 func 1 Using index; Using where
+Warnings:
+Note 1003 SELECT SUM(a + SLEEP(0.1)) FROM t1 WHERE a IN ( SELECT c FROM t2 WHERE d < b ) OR b < 's'
+SUM(a + SLEEP(0.1))
+7862
+set debug_dbug=@old_debug;
+drop table t1, t2;
+#
+# MDEV-412: SHOW EXPLAIN: Server crashes in JOIN::print_explain on a query with inner join and ORDER BY the same column twice
+#
+CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(3), KEY(b)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES
+(3795,'USA'),(3913,'USA'),(3846,'ITA'),(4021,'USA'),(4005,'RUS'),(4038,'USA'),
+(3825,'USA'),(3840,'USA'),(3987,'USA'),(3807,'USA'),(3896,'USA'),(4052,'USA'),
+(3973,'USA'),(3982,'ITA'),(3965,'USA'),(3852,'RUS'),(4006,'USA'),(3800,'USA'),
+(4020,'USA'),(4040,'USA'),(3916,'USA'),(3817,'USA'),(3885,'USA'),(3802,'USA'),
+(4009,'ITA'),(3895,'USA'),(3963,'RUS'),(4045,'USA'),(3988,'USA'),(3815,'USA'),
+(4063,'USA'),(3978,'USA'),(4019,'USA'),(3954,'USA'),(3950,'USA'),(3974,'ITA'),
+(4054,'USA'),(4061,'RUS'),(3976,'USA'),(3966,'USA'),(3957,'USA'),(3981,'USA'),
+(3923,'USA'),(3876,'USA'),(3819,'USA'),(3877,'USA'),(3829,'ITA'),(3964,'USA'),
+(4053,'RUS'),(3917,'USA'),(3874,'USA'),(4023,'USA'),(4001,'USA'),(3872,'USA'),
+(3890,'USA'),(3962,'USA'),(3886,'USA'),(4026,'ITA'),(3869,'USA'),(3937,'RUS'),
+(3975,'USA'),(3944,'USA'),(3908,'USA'),(3867,'USA'),(3947,'USA'),(3838,'USA'),
+(3796,'USA'),(3893,'USA'),(3920,'ITA'),(3994,'USA'),(3875,'RUS'),(4011,'USA'),
+(4013,'USA'),(3810,'USA'),(3834,'USA'),(3968,'USA'),(3931,'USA'),(3839,'USA'),
+(4042,'USA'),(4039,'ITA'),(3811,'USA'),(3837,'RUS'),(4041,'USA'),(3884,'USA'),
+(3894,'USA'),(3879,'USA'),(3942,'USA'),(3959,'USA'),(3814,'USA'),(4044,'USA'),
+(3971,'ITA'),(3823,'USA'),(3793,'RUS'),(3855,'USA'),(3905,'USA'),(3865,'USA'),
+(4046,'USA'),(3990,'USA'),(4022,'USA'),(3833,'USA'),(3918,'USA'),(4064,'ITA'),
+(3821,'USA'),(3836,'RUS'),(3921,'USA'),(3914,'USA'),(3888,'USA');
+CREATE TABLE t2 (c VARCHAR(3) PRIMARY KEY) ENGINE=MyISAM;
+INSERT INTO t2 VALUES ('USA');
+CREATE TABLE t3 (d VARCHAR(3), e VARCHAR(52), PRIMARY KEY (d,e)) ENGINE=MyISAM;
+INSERT INTO t3 VALUES
+('JPN','Japanese'),('KOR','Korean'),('POL','Polish'),('PRT','Portuguese'),
+('ESP','Spanish'),('FRA','French'),('VNM','Vietnamese');
+explain
+SELECT b AS field1, b AS field2 FROM t1, t2, t3 WHERE d = b ORDER BY field1, field2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 system NULL NULL NULL NULL 1 Using filesort
+1 SIMPLE t1 index b b 6 NULL 107 Using where; Using index
+1 SIMPLE t3 ref PRIMARY PRIMARY 5 test.t1.b 1 Using index
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_do_select';
+SELECT b AS field1, b AS field2 FROM t1, t2, t3 WHERE d = b ORDER BY field1, field2;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 system NULL NULL NULL NULL 1 Using filesort
+1 SIMPLE t1 index b b 6 NULL 107 Using where; Using index
+1 SIMPLE t3 ref PRIMARY PRIMARY 5 test.t1.b 1 Using index
+Warnings:
+Note 1003 SELECT b AS field1, b AS field2 FROM t1, t2, t3 WHERE d = b ORDER BY field1, field2
+field1 field2
+set debug_dbug=@old_debug;
+DROP TABLE t1,t2,t3;
+#
+# MDEV-423: SHOW EXPLAIN: 'Using where' for a subquery is shown in EXPLAIN, but not in SHOW EXPLAIN output
+#
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (7),(0),(9),(3),(4),(2),(5),(7),(0),(9),(3),(4),(2),(5);
+CREATE TABLE t2 (b INT, c INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES
+(0,4),(8,6),(1,3),(8,5),(9,3),(24,246),(6,2),(1,9),(6,3),(2,8),
+(4,1),(8,8),(4,8),(4,5),(7,7),(4,5),(1,1),(9,6),(4,2),(8,9);
+create table t3 like t2;
+insert into t3 select * from t2;
+explain
+SELECT max(a+b+c) FROM t1 AS alias1, ( SELECT * FROM t2 ) AS alias
+WHERE EXISTS ( SELECT * FROM t3 WHERE b = c ) OR a <= 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY alias1 ALL NULL NULL NULL NULL 14
+1 PRIMARY t2 ALL NULL NULL NULL NULL 20
+3 SUBQUERY t3 ALL NULL NULL NULL NULL 20 Using where
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+SELECT max(a+b+c) FROM t1 AS alias1, ( SELECT * FROM t2 ) AS alias
+WHERE EXISTS ( SELECT * FROM t3 WHERE b = c ) OR a <= 10;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY alias1 ALL NULL NULL NULL NULL 14
+1 PRIMARY t2 ALL NULL NULL NULL NULL 20
+3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Query plan already deleted
+Warnings:
+Note 1003 SELECT max(a+b+c) FROM t1 AS alias1, ( SELECT * FROM t2 ) AS alias
+WHERE EXISTS ( SELECT * FROM t3 WHERE b = c ) OR a <= 10
+max(a+b+c)
+279
+set debug_dbug=@old_debug;
+DROP TABLE t1,t2,t3;
+#
+# MDEV-416: Server crashes in SQL_SELECT::cleanup on EXPLAIN with SUM ( DISTINCT ) in a non-correlated subquery (5.5-show-explain tree)
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (b INT);
+INSERT INTO t2 VALUES (8),(9);
+EXPLAIN SELECT * FROM t1
+WHERE ( 8, 89 ) IN ( SELECT b, SUM( DISTINCT b ) FROM t2 GROUP BY b );
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using filesort
+DROP TABLE t1,t2;
+#
+# Check if queries in non-default charsets work.
+#
+set names cp1251;
+select charset('ãû');
+charset('ãû')
+cp1251
+select hex('ãû');
+hex('ãû')
+E3FB
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+select * from t0 where length('ãû') = a;
+set names utf8;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using where
+Warnings:
+Note 1003 select * from t0 where length('гы') = a
+set names default;
+a
+2
+set debug_dbug=@old_debug;
+set names default;
+#
+# MDEV-462: SHOW EXPLAIN: Assertion `table_list->table' fails in find_field_in_table_ref if FOR contains a non-numeric value
+#
+show explain for foo;
+ERROR HY000: You may only use constant expressions in this statement
+# End
+drop table t0;
+set debug_sync='RESET';
diff --git a/mysql-test/r/show_explain_ps.result b/mysql-test/r/show_explain_ps.result
new file mode 100644
index 00000000000..625b9cfddae
--- /dev/null
+++ b/mysql-test/r/show_explain_ps.result
@@ -0,0 +1,27 @@
+drop table if exists t0, t1;
+select * from performance_schema.setup_instruments where name like '%show_explain%';
+NAME ENABLED TIMED
+wait/synch/cond/sql/show_explain YES YES
+# We've got no instances
+select * from performance_schema.cond_instances where name like '%show_explain%';
+NAME OBJECT_INSTANCE_BEGIN
+# Check out if our cond is hit.
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+set @show_explain_probe_select_id=1;
+set debug_dbug='d,show_explain_probe_join_exec_start';
+select count(*) from t0 where a < 100000;
+show explain for $thr2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 ALL NULL NULL NULL NULL 10 Using where
+Warnings:
+Note 1003 select count(*) from t0 where a < 100000
+count(*)
+10
+set debug_dbug='';
+select event_name
+from performance_schema.events_waits_history_long
+where event_name='wait/synch/cond/sql/show_explain';
+event_name
+wait/synch/cond/sql/show_explain
+drop table t0;
diff --git a/mysql-test/r/signal_code.result b/mysql-test/r/signal_code.result
index ca46f1d2079..4cd1de97729 100644
--- a/mysql-test/r/signal_code.result
+++ b/mysql-test/r/signal_code.result
@@ -20,16 +20,16 @@ return 0;
end $$
show procedure code signal_proc;
Pos Instruction
-0 stmt 130 "SIGNAL foo"
-1 stmt 130 "SIGNAL foo SET MESSAGE_TEXT = "This i..."
-2 stmt 131 "RESIGNAL foo"
-3 stmt 131 "RESIGNAL foo SET MESSAGE_TEXT = "This..."
+0 stmt 132 "SIGNAL foo"
+1 stmt 132 "SIGNAL foo SET MESSAGE_TEXT = "This i..."
+2 stmt 133 "RESIGNAL foo"
+3 stmt 133 "RESIGNAL foo SET MESSAGE_TEXT = "This..."
drop procedure signal_proc;
show function code signal_func;
Pos Instruction
-0 stmt 130 "SIGNAL foo"
-1 stmt 130 "SIGNAL foo SET MESSAGE_TEXT = "This i..."
-2 stmt 131 "RESIGNAL foo"
-3 stmt 131 "RESIGNAL foo SET MESSAGE_TEXT = "This..."
+0 stmt 132 "SIGNAL foo"
+1 stmt 132 "SIGNAL foo SET MESSAGE_TEXT = "This i..."
+2 stmt 133 "RESIGNAL foo"
+3 stmt 133 "RESIGNAL foo SET MESSAGE_TEXT = "This..."
4 freturn 3 0
drop function signal_func;
diff --git a/mysql-test/r/sp-code.result b/mysql-test/r/sp-code.result
index 67651294498..3061c58747a 100644
--- a/mysql-test/r/sp-code.result
+++ b/mysql-test/r/sp-code.result
@@ -155,11 +155,11 @@ Pos Instruction
0 stmt 9 "drop temporary table if exists sudoku..."
1 stmt 1 "create temporary table sudoku_work ( ..."
2 stmt 1 "create temporary table sudoku_schedul..."
-3 stmt 88 "call sudoku_init()"
+3 stmt 90 "call sudoku_init()"
4 jump_if_not 7(8) p_naive@0
5 stmt 4 "update sudoku_work set cnt = 0 where ..."
6 jump 8
-7 stmt 88 "call sudoku_count()"
+7 stmt 90 "call sudoku_count()"
8 stmt 6 "insert into sudoku_schedule (row,col)..."
9 set v_scounter@2 0
10 set v_i@3 1
diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result
index defd3955f6c..c297ceda572 100644
--- a/mysql-test/r/sp.result
+++ b/mysql-test/r/sp.result
@@ -6403,14 +6403,14 @@ select 1;
/*! select 2; */
select 3;
/*!00000 select 4; */
-/*!99999 select 5; */
+/*!999999 select 5; */
end
$$
create procedure proc_25411_b(
/* real comment */
/*! p1 int, */
/*!00000 p2 int */
-/*!99999 ,p3 int */
+/*!999999 ,p3 int */
)
begin
select p1, p2;
@@ -6418,11 +6418,11 @@ end
$$
create procedure proc_25411_c()
begin
-select 1/*!,2*//*!00000,3*//*!99999,4*/;
-select 1/*! ,2*//*!00000 ,3*//*!99999 ,4*/;
-select 1/*!,2 *//*!00000,3 *//*!99999,4 */;
-select 1/*! ,2 *//*!00000 ,3 *//*!99999 ,4 */;
-select 1 /*!,2*/ /*!00000,3*/ /*!99999,4*/ ;
+select 1/*!,2*//*!00000,3*//*!999999,4*/;
+select 1/*! ,2*//*!00000 ,3*//*!999999 ,4*/;
+select 1/*!,2 *//*!00000,3 *//*!999999,4 */;
+select 1/*! ,2 *//*!00000 ,3 *//*!999999 ,4 */;
+select 1 /*!,2*/ /*!00000,3*/ /*!999999,4*/ ;
end
$$
show create procedure proc_25411_a;
diff --git a/mysql-test/r/sql_mode.result b/mysql-test/r/sql_mode.result
index 6736ca7f541..4fdac6b9cea 100644
--- a/mysql-test/r/sql_mode.result
+++ b/mysql-test/r/sql_mode.result
@@ -71,7 +71,7 @@ t1 CREATE TABLE `t1` (
`email` varchar(60) NOT NULL DEFAULT '',
PRIMARY KEY (`a`),
UNIQUE KEY `email` (`email`)
-) TYPE=HEAP ROW_FORMAT=DYNAMIC
+) TYPE=MEMORY ROW_FORMAT=DYNAMIC
set sql_mode="postgresql,oracle,mssql,db2,maxdb";
select @@sql_mode;
@@sql_mode
diff --git a/mysql-test/r/stat_tables.result b/mysql-test/r/stat_tables.result
new file mode 100644
index 00000000000..6905725a6a2
--- /dev/null
+++ b/mysql-test/r/stat_tables.result
@@ -0,0 +1,383 @@
+select @@global.use_stat_tables;
+@@global.use_stat_tables
+COMPLEMENTARY
+select @@session.use_stat_tables;
+@@session.use_stat_tables
+COMPLEMENTARY
+set @save_use_stat_tables=@@use_stat_tables;
+set use_stat_tables='preferably';
+DROP DATABASE IF EXISTS dbt3_s001;
+CREATE DATABASE dbt3_s001;
+use dbt3_s001;
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='extended_keys=off';
+select * from mysql.table_stats;
+db_name table_name cardinality
+dbt3_s001 customer 150
+dbt3_s001 lineitem 6005
+dbt3_s001 nation 25
+dbt3_s001 orders 1500
+dbt3_s001 part 200
+dbt3_s001 partsupp 700
+dbt3_s001 region 5
+dbt3_s001 supplier 10
+select * from mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 customer PRIMARY 1 1.0000
+dbt3_s001 customer i_c_nationkey 1 6.0000
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 nation PRIMARY 1 1.0000
+dbt3_s001 nation i_n_regionkey 1 5.0000
+dbt3_s001 orders PRIMARY 1 1.0000
+dbt3_s001 orders i_o_orderdate 1 1.3321
+dbt3_s001 orders i_o_custkey 1 15.0000
+dbt3_s001 part PRIMARY 1 1.0000
+dbt3_s001 part i_p_retailprice 1 1.0000
+dbt3_s001 partsupp PRIMARY 1 3.5000
+dbt3_s001 partsupp PRIMARY 2 1.0000
+dbt3_s001 partsupp i_ps_partkey 1 3.5000
+dbt3_s001 partsupp i_ps_suppkey 1 70.0000
+dbt3_s001 region PRIMARY 1 1.0000
+dbt3_s001 supplier PRIMARY 1 1.0000
+dbt3_s001 supplier i_s_nationkey 1 1.1111
+set optimizer_switch=@save_optimizer_switch;
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='index_condition_pushdown=off';
+EXPLAIN select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
+from customer, orders, lineitem, supplier, nation, region
+where c_custkey = o_custkey and l_orderkey = o_orderkey
+and l_suppkey = s_suppkey and c_nationkey = s_nationkey
+and s_nationkey = n_nationkey and n_regionkey = r_regionkey
+and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
+ and o_orderdate < date '1995-01-01' + interval '1' year
+group by n_name
+order by revenue desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 179 Using where; Using temporary; Using filesort
+1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
+1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+1 SIMPLE supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.lineitem.l_suppkey 1 Using where
+select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
+from customer, orders, lineitem, supplier, nation, region
+where c_custkey = o_custkey and l_orderkey = o_orderkey
+and l_suppkey = s_suppkey and c_nationkey = s_nationkey
+and s_nationkey = n_nationkey and n_regionkey = r_regionkey
+and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
+ and o_orderdate < date '1995-01-01' + interval '1' year
+group by n_name
+order by revenue desc;
+n_name revenue
+PERU 321915.8715
+ARGENTINA 69817.1451
+set optimizer_switch=@save_optimizer_switch;
+delete from mysql.index_stats;
+select * from mysql.table_stats;
+db_name table_name cardinality
+dbt3_s001 customer 150
+dbt3_s001 lineitem 6005
+dbt3_s001 nation 25
+dbt3_s001 orders 1500
+dbt3_s001 part 200
+dbt3_s001 partsupp 700
+dbt3_s001 region 5
+dbt3_s001 supplier 10
+select * from mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 customer PRIMARY 1 1.0000
+dbt3_s001 customer i_c_nationkey 1 6.0000
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 nation PRIMARY 1 1.0000
+dbt3_s001 nation i_n_regionkey 1 5.0000
+dbt3_s001 orders PRIMARY 1 1.0000
+dbt3_s001 orders i_o_orderdate 1 1.3321
+dbt3_s001 orders i_o_custkey 1 15.0000
+dbt3_s001 part PRIMARY 1 1.0000
+dbt3_s001 part i_p_retailprice 1 1.0000
+dbt3_s001 partsupp PRIMARY 1 3.5000
+dbt3_s001 partsupp PRIMARY 2 1.0000
+dbt3_s001 partsupp i_ps_partkey 1 3.5000
+dbt3_s001 partsupp i_ps_suppkey 1 70.0000
+dbt3_s001 region PRIMARY 1 1.0000
+dbt3_s001 supplier PRIMARY 1 1.0000
+dbt3_s001 supplier i_s_nationkey 1 1.1111
+select * from mysql.table_stats where table_name='orders';
+db_name table_name cardinality
+dbt3_s001 orders 1500
+select * from mysql.index_stats where table_name='orders';
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 orders PRIMARY 1 1.0000
+dbt3_s001 orders i_o_orderdate 1 1.3321
+dbt3_s001 orders i_o_custkey 1 15.0000
+select (select cardinality from mysql.table_stats where table_name='orders') /
+(select avg_frequency from mysql.index_stats
+where index_name='i_o_orderdate' and prefix_arity=1) as n_distinct;
+n_distinct
+1126.0416
+select count(distinct o_orderdate) from orders;
+count(distinct o_orderdate)
+1126
+select (select cardinality from mysql.table_stats where table_name='orders') /
+(select avg_frequency from mysql.index_stats
+where index_name='i_o_custkey' and prefix_arity=1) as n_distinct;
+n_distinct
+100.0000
+select count(distinct o_custkey) from orders;
+count(distinct o_custkey)
+100
+show index from orders;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+orders 0 PRIMARY 1 o_orderkey A 1500 NULL NULL BTREE
+orders 1 i_o_orderdate 1 o_orderDATE A 1126 NULL NULL YES BTREE
+orders 1 i_o_custkey 1 o_custkey A 100 NULL NULL YES BTREE
+select index_name, column_name, cardinality from information_schema.statistics
+where table_name='orders';
+index_name column_name cardinality
+PRIMARY o_orderkey 1500
+i_o_orderdate o_orderDATE 1126
+i_o_custkey o_custkey 100
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='index_condition_pushdown=off';
+EXPLAIN select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
+from customer, orders, lineitem, supplier, nation, region
+where c_custkey = o_custkey and l_orderkey = o_orderkey
+and l_suppkey = s_suppkey and c_nationkey = s_nationkey
+and s_nationkey = n_nationkey and n_regionkey = r_regionkey
+and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
+ and o_orderdate < date '1995-01-01' + interval '1' year
+group by n_name
+order by revenue desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 179 Using where; Using temporary; Using filesort
+1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
+1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+1 SIMPLE supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.lineitem.l_suppkey 1 Using where
+select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
+from customer, orders, lineitem, supplier, nation, region
+where c_custkey = o_custkey and l_orderkey = o_orderkey
+and l_suppkey = s_suppkey and c_nationkey = s_nationkey
+and s_nationkey = n_nationkey and n_regionkey = r_regionkey
+and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
+ and o_orderdate < date '1995-01-01' + interval '1' year
+group by n_name
+order by revenue desc;
+n_name revenue
+PERU 321915.8715
+ARGENTINA 69817.1451
+set optimizer_switch=@save_optimizer_switch;
+EXPLAIN select o_year,
+sum(case when nation = 'UNITED STATES' then volume else 0 end) /
+sum(volume) as mkt_share
+from (select extract(year from o_orderdate) as o_year,
+l_extendedprice * (1-l_discount) as volume,
+n2.n_name as nation
+from part, supplier, lineitem, orders, customer,
+nation n1, nation n2, region
+where p_partkey = l_partkey and s_suppkey = l_suppkey
+and l_orderkey = o_orderkey and o_custkey = c_custkey
+and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey
+and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey
+and o_orderdate between date '1995-01-01' and date '1996-12-31'
+ and p_type = 'STANDARD BRUSHED STEEL' ) as all_nations
+group by o_year
+order by o_year;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders ALL PRIMARY,i_o_orderdate,i_o_custkey NULL NULL NULL 1500 Using where; Using temporary; Using filesort
+1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
+1 SIMPLE n1 eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+1 SIMPLE part eq_ref PRIMARY PRIMARY 4 dbt3_s001.lineitem.l_partkey 1 Using where
+1 SIMPLE supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.lineitem.l_suppkey 1 Using where
+1 SIMPLE n2 eq_ref PRIMARY PRIMARY 4 dbt3_s001.supplier.s_nationkey 1
+select o_year,
+sum(case when nation = 'UNITED STATES' then volume else 0 end) /
+sum(volume) as mkt_share
+from (select extract(year from o_orderdate) as o_year,
+l_extendedprice * (1-l_discount) as volume,
+n2.n_name as nation
+from part, supplier, lineitem, orders, customer,
+nation n1, nation n2, region
+where p_partkey = l_partkey and s_suppkey = l_suppkey
+and l_orderkey = o_orderkey and o_custkey = c_custkey
+and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey
+and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey
+and o_orderdate between date '1995-01-01' and date '1996-12-31'
+ and p_type = 'STANDARD BRUSHED STEEL' ) as all_nations
+group by o_year
+order by o_year;
+o_year mkt_share
+1995 0.4495521838895718
+1996 0.024585468215352495
+EXPLAIN select nation, o_year, sum(amount) as sum_profit
+from (select n_name as nation,
+extract(year from o_orderdate) as o_year,
+l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount
+from part, supplier, lineitem, partsupp, orders, nation
+where s_suppkey = l_suppkey and ps_suppkey = l_suppkey
+and ps_partkey = l_partkey and p_partkey = l_partkey
+and o_orderkey = l_orderkey and s_nationkey = n_nationkey
+and p_name like '%green%') as profit
+group by nation, o_year
+order by nation, o_year desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE supplier ALL PRIMARY,i_s_nationkey NULL NULL NULL 10 Using where; Using temporary; Using filesort
+1 SIMPLE nation eq_ref PRIMARY PRIMARY 4 dbt3_s001.supplier.s_nationkey 1
+1 SIMPLE partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey i_ps_suppkey 4 dbt3_s001.supplier.s_suppkey 70
+1 SIMPLE part eq_ref PRIMARY PRIMARY 4 dbt3_s001.partsupp.ps_partkey 1 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.supplier.s_suppkey 8
+1 SIMPLE orders eq_ref PRIMARY PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1
+select nation, o_year, sum(amount) as sum_profit
+from (select n_name as nation,
+extract(year from o_orderdate) as o_year,
+l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount
+from part, supplier, lineitem, partsupp, orders, nation
+where s_suppkey = l_suppkey and ps_suppkey = l_suppkey
+and ps_partkey = l_partkey and p_partkey = l_partkey
+and o_orderkey = l_orderkey and s_nationkey = n_nationkey
+and p_name like '%green%') as profit
+group by nation, o_year
+order by nation, o_year desc;
+nation o_year sum_profit
+ARGENTINA 1997 18247.873399999993
+ARGENTINA 1996 7731.089399999995
+ARGENTINA 1995 134490.5697
+ARGENTINA 1994 36767.101500000004
+ARGENTINA 1993 35857.08
+ARGENTINA 1992 35740
+ETHIOPIA 1998 2758.7801999999992
+ETHIOPIA 1997 19419.294599999997
+ETHIOPIA 1995 51231.87439999999
+ETHIOPIA 1994 3578.9478999999974
+ETHIOPIA 1992 1525.8234999999986
+IRAN 1998 37817.229600000006
+IRAN 1997 52643.77359999999
+IRAN 1996 70143.7761
+IRAN 1995 84094.58260000001
+IRAN 1994 18140.925599999995
+IRAN 1993 78655.1676
+IRAN 1992 87142.23960000002
+IRAQ 1998 22860.8082
+IRAQ 1997 93676.24359999999
+IRAQ 1996 45103.3242
+IRAQ 1994 36010.728599999995
+IRAQ 1993 33221.9399
+IRAQ 1992 47755.05900000001
+KENYA 1998 44194.831999999995
+KENYA 1997 57578.36259999999
+KENYA 1996 59195.90210000001
+KENYA 1995 79262.6278
+KENYA 1994 102360.66609999999
+KENYA 1993 128422.0196
+KENYA 1992 181517.2089
+MOROCCO 1998 41797.823199999984
+MOROCCO 1997 23685.801799999994
+MOROCCO 1996 62115.19579999998
+MOROCCO 1995 42442.64300000001
+MOROCCO 1994 48655.878000000004
+MOROCCO 1993 22926.744400000003
+MOROCCO 1992 32239.8088
+PERU 1998 86999.36459999997
+PERU 1997 121110.41070000001
+PERU 1996 177040.40759999995
+PERU 1995 122247.94520000002
+PERU 1994 88046.25329999998
+PERU 1993 49379.813799999996
+PERU 1992 80646.86050000001
+UNITED KINGDOM 1998 50577.25560000001
+UNITED KINGDOM 1997 114288.8605
+UNITED KINGDOM 1996 147684.46480000002
+UNITED KINGDOM 1995 225267.65759999998
+UNITED KINGDOM 1994 140595.5864
+UNITED KINGDOM 1993 322548.49210000003
+UNITED KINGDOM 1992 67747.88279999999
+UNITED STATES 1998 3957.0431999999996
+UNITED STATES 1997 94729.5704
+UNITED STATES 1996 79297.85670000002
+UNITED STATES 1995 62201.23360000001
+UNITED STATES 1994 43075.629899999985
+UNITED STATES 1993 27168.486199999996
+UNITED STATES 1992 34092.366
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='extended_keys=on';
+EXPLAIN select o_orderkey, p_partkey
+from part, lineitem, orders
+where p_retailprice > 1100 and o_orderdate='1997-01-01'
+and o_orderkey=l_orderkey and p_partkey=l_partkey;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders ref PRIMARY,i_o_orderdate i_o_orderdate 4 const 1
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+1 SIMPLE part eq_ref PRIMARY,i_p_retailprice PRIMARY 4 dbt3_s001.lineitem.l_partkey 1 Using where
+select o_orderkey, p_partkey
+from part, lineitem, orders
+where p_retailprice > 1100 and o_orderdate='1997-01-01'
+and o_orderkey=l_orderkey and p_partkey=l_partkey;
+o_orderkey p_partkey
+5895 200
+set optimizer_switch=@save_optimizer_switch;
+DROP DATABASE dbt3_s001;
+use test;
+#
+# Bug mdev-473: ANALYZE table locked for write
+#
+set use_stat_tables='complementary';
+create table t1 (i int);
+lock table t1 write;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Table is already up to date
+alter table t1 add column a varchar(8);
+drop table t1;
+#
+# Bug mdev-487: memory leak in ANALYZE with stat tables
+#
+SET use_stat_tables = 'preferably';
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+DELETE FROM t1 WHERE a=1;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+DROP TABLE t1;
+#
+# Bug mdev-518: corrupted/missing statistical tables
+#
+CREATE TABLE t1 (i int) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+FLUSH TABLE t1;
+SET use_stat_tables='never';
+EXPLAIN SELECT * FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2
+FLUSH TABLES;
+SET use_stat_tables='preferably';
+EXPLAIN SELECT * FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2
+DROP TABLE t1;
+set use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/r/stat_tables_disabled.result b/mysql-test/r/stat_tables_disabled.result
new file mode 100644
index 00000000000..f57abc34e0c
--- /dev/null
+++ b/mysql-test/r/stat_tables_disabled.result
@@ -0,0 +1,70 @@
+SET SESSION STORAGE_ENGINE='InnoDB';
+select @@global.use_stat_tables;
+@@global.use_stat_tables
+NEVER
+select @@session.use_stat_tables;
+@@session.use_stat_tables
+NEVER
+set @save_use_stat_tables=@@use_stat_tables;
+set use_stat_tables='preferably';
+DROP DATABASE IF EXISTS dbt3_s001;
+CREATE DATABASE dbt3_s001;
+use dbt3_s001;
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='extended_keys=off';
+#
+# Bug mdev-503: optimizer ignores setting use_stat_tables='preferably'
+#
+flush tables
+customer, lineitem, nation, orders, part, partsupp, region, supplier;
+set use_stat_tables='never';
+EXPLAIN select sql_calc_found_rows straight_join
+l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue,
+o_orderdate, o_shippriority
+from orders, customer, lineitem
+where c_mktsegment = 'BUILDING' and c_custkey = o_custkey
+and l_orderkey = o_orderkey and o_orderdate < date '1995-03-15'
+ and l_shipdate > date '1995-03-15'
+group by l_orderkey, o_orderdate, o_shippriority
+order by revenue desc, o_orderdate
+limit 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders ALL PRIMARY,i_o_orderdate,i_o_custkey NULL NULL NULL # Using where; Using temporary; Using filesort
+1 SIMPLE customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey # Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey # Using where
+set use_stat_tables='preferably';
+EXPLAIN select sql_calc_found_rows straight_join
+l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue,
+o_orderdate, o_shippriority
+from orders, customer, lineitem
+where c_mktsegment = 'BUILDING' and c_custkey = o_custkey
+and l_orderkey = o_orderkey and o_orderdate < date '1995-03-15'
+ and l_shipdate > date '1995-03-15'
+group by l_orderkey, o_orderdate, o_shippriority
+order by revenue desc, o_orderdate
+limit 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders ALL PRIMARY,i_o_orderdate,i_o_custkey NULL NULL NULL 1500 Using where; Using temporary; Using filesort
+1 SIMPLE customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+flush tables customer, orders, lineitem;
+EXPLAIN select sql_calc_found_rows straight_join
+l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue,
+o_orderdate, o_shippriority
+from orders, customer, lineitem
+where c_mktsegment = 'BUILDING' and c_custkey = o_custkey
+and l_orderkey = o_orderkey and o_orderdate < date '1995-03-15'
+ and l_shipdate > date '1995-03-15'
+group by l_orderkey, o_orderdate, o_shippriority
+order by revenue desc, o_orderdate
+limit 10;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders ALL PRIMARY,i_o_orderdate,i_o_custkey NULL NULL NULL 1500 Using where; Using temporary; Using filesort
+1 SIMPLE customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+# End of the test case for mdev-503
+set optimizer_switch=@save_optimizer_switch;
+DROP DATABASE dbt3_s001;
+use test;
+set use_stat_tables=@save_use_stat_tables;
+SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/r/stat_tables_innodb.result b/mysql-test/r/stat_tables_innodb.result
new file mode 100644
index 00000000000..51e1567309b
--- /dev/null
+++ b/mysql-test/r/stat_tables_innodb.result
@@ -0,0 +1,412 @@
+SET SESSION STORAGE_ENGINE='InnoDB';
+set @save_optimizer_switch_for_stat_tables_test=@@optimizer_switch;
+set optimizer_switch='extended_keys=on';
+select @@global.use_stat_tables;
+@@global.use_stat_tables
+COMPLEMENTARY
+select @@session.use_stat_tables;
+@@session.use_stat_tables
+COMPLEMENTARY
+set @save_use_stat_tables=@@use_stat_tables;
+set use_stat_tables='preferably';
+DROP DATABASE IF EXISTS dbt3_s001;
+CREATE DATABASE dbt3_s001;
+use dbt3_s001;
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='extended_keys=off';
+select * from mysql.table_stats;
+db_name table_name cardinality
+dbt3_s001 customer 150
+dbt3_s001 lineitem 6005
+dbt3_s001 nation 25
+dbt3_s001 orders 1500
+dbt3_s001 part 200
+dbt3_s001 partsupp 700
+dbt3_s001 region 5
+dbt3_s001 supplier 10
+select * from mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 customer PRIMARY 1 1.0000
+dbt3_s001 customer i_c_nationkey 1 6.0000
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 nation PRIMARY 1 1.0000
+dbt3_s001 nation i_n_regionkey 1 5.0000
+dbt3_s001 orders PRIMARY 1 1.0000
+dbt3_s001 orders i_o_orderdate 1 1.3321
+dbt3_s001 orders i_o_custkey 1 15.0000
+dbt3_s001 part PRIMARY 1 1.0000
+dbt3_s001 part i_p_retailprice 1 1.0000
+dbt3_s001 partsupp PRIMARY 1 3.5000
+dbt3_s001 partsupp PRIMARY 2 1.0000
+dbt3_s001 partsupp i_ps_partkey 1 3.5000
+dbt3_s001 partsupp i_ps_suppkey 1 70.0000
+dbt3_s001 region PRIMARY 1 1.0000
+dbt3_s001 supplier PRIMARY 1 1.0000
+dbt3_s001 supplier i_s_nationkey 1 1.1111
+set optimizer_switch=@save_optimizer_switch;
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='index_condition_pushdown=off';
+EXPLAIN select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
+from customer, orders, lineitem, supplier, nation, region
+where c_custkey = o_custkey and l_orderkey = o_orderkey
+and l_suppkey = s_suppkey and c_nationkey = s_nationkey
+and s_nationkey = n_nationkey and n_regionkey = r_regionkey
+and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
+ and o_orderdate < date '1995-01-01' + interval '1' year
+group by n_name
+order by revenue desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 211 Using where; Using temporary; Using filesort
+1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
+1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
+1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.customer.c_nationkey 1 Using index
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
+from customer, orders, lineitem, supplier, nation, region
+where c_custkey = o_custkey and l_orderkey = o_orderkey
+and l_suppkey = s_suppkey and c_nationkey = s_nationkey
+and s_nationkey = n_nationkey and n_regionkey = r_regionkey
+and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
+ and o_orderdate < date '1995-01-01' + interval '1' year
+group by n_name
+order by revenue desc;
+n_name revenue
+PERU 321915.8715
+ARGENTINA 69817.1451
+set optimizer_switch=@save_optimizer_switch;
+delete from mysql.index_stats;
+select * from mysql.table_stats;
+db_name table_name cardinality
+dbt3_s001 customer 150
+dbt3_s001 lineitem 6005
+dbt3_s001 nation 25
+dbt3_s001 orders 1500
+dbt3_s001 part 200
+dbt3_s001 partsupp 700
+dbt3_s001 region 5
+dbt3_s001 supplier 10
+select * from mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 customer PRIMARY 1 1.0000
+dbt3_s001 customer i_c_nationkey 1 6.0000
+dbt3_s001 customer i_c_nationkey 2 1.0000
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_shipdate 2 1.0149
+dbt3_s001 lineitem i_l_shipdate 3 1.0000
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_suppkey_partkey 3 1.0030
+dbt3_s001 lineitem i_l_suppkey_partkey 4 1.0000
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_partkey 2 1.0089
+dbt3_s001 lineitem i_l_partkey 3 1.0000
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey 2 1.2073
+dbt3_s001 lineitem i_l_suppkey 3 1.0000
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_receiptdate 2 1.0152
+dbt3_s001 lineitem i_l_receiptdate 3 1.0000
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey 2 1.0000
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_orderkey_quantity 3 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_commitdate 2 1.0364
+dbt3_s001 lineitem i_l_commitdate 3 1.0000
+dbt3_s001 nation PRIMARY 1 1.0000
+dbt3_s001 nation i_n_regionkey 1 5.0000
+dbt3_s001 nation i_n_regionkey 2 1.0000
+dbt3_s001 orders PRIMARY 1 1.0000
+dbt3_s001 orders i_o_orderdate 1 1.3321
+dbt3_s001 orders i_o_orderdate 2 1.0000
+dbt3_s001 orders i_o_custkey 1 15.0000
+dbt3_s001 orders i_o_custkey 2 1.0000
+dbt3_s001 part PRIMARY 1 1.0000
+dbt3_s001 part i_p_retailprice 1 1.0000
+dbt3_s001 part i_p_retailprice 2 1.0000
+dbt3_s001 partsupp PRIMARY 1 3.5000
+dbt3_s001 partsupp PRIMARY 2 1.0000
+dbt3_s001 partsupp i_ps_partkey 1 3.5000
+dbt3_s001 partsupp i_ps_partkey 2 1.0000
+dbt3_s001 partsupp i_ps_suppkey 1 70.0000
+dbt3_s001 partsupp i_ps_suppkey 2 1.0000
+dbt3_s001 region PRIMARY 1 1.0000
+dbt3_s001 supplier PRIMARY 1 1.0000
+dbt3_s001 supplier i_s_nationkey 1 1.1111
+dbt3_s001 supplier i_s_nationkey 2 1.0000
+select * from mysql.table_stats where table_name='orders';
+db_name table_name cardinality
+dbt3_s001 orders 1500
+select * from mysql.index_stats where table_name='orders';
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 orders PRIMARY 1 1.0000
+dbt3_s001 orders i_o_orderdate 1 1.3321
+dbt3_s001 orders i_o_orderdate 2 1.0000
+dbt3_s001 orders i_o_custkey 1 15.0000
+dbt3_s001 orders i_o_custkey 2 1.0000
+select (select cardinality from mysql.table_stats where table_name='orders') /
+(select avg_frequency from mysql.index_stats
+where index_name='i_o_orderdate' and prefix_arity=1) as n_distinct;
+n_distinct
+1126.0416
+select count(distinct o_orderdate) from orders;
+count(distinct o_orderdate)
+1126
+select (select cardinality from mysql.table_stats where table_name='orders') /
+(select avg_frequency from mysql.index_stats
+where index_name='i_o_custkey' and prefix_arity=1) as n_distinct;
+n_distinct
+100.0000
+select count(distinct o_custkey) from orders;
+count(distinct o_custkey)
+100
+show index from orders;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+orders 0 PRIMARY 1 o_orderkey A 1500 NULL NULL BTREE
+orders 1 i_o_orderdate 1 o_orderDATE A 1126 NULL NULL YES BTREE
+orders 1 i_o_custkey 1 o_custkey A 100 NULL NULL YES BTREE
+select index_name, column_name, cardinality from information_schema.statistics
+where table_name='orders';
+index_name column_name cardinality
+PRIMARY o_orderkey 1500
+i_o_orderdate o_orderDATE 1126
+i_o_custkey o_custkey 100
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='index_condition_pushdown=off';
+EXPLAIN select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
+from customer, orders, lineitem, supplier, nation, region
+where c_custkey = o_custkey and l_orderkey = o_orderkey
+and l_suppkey = s_suppkey and c_nationkey = s_nationkey
+and s_nationkey = n_nationkey and n_regionkey = r_regionkey
+and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
+ and o_orderdate < date '1995-01-01' + interval '1' year
+group by n_name
+order by revenue desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 211 Using where; Using temporary; Using filesort
+1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
+1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
+1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.customer.c_nationkey 1 Using index
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
+from customer, orders, lineitem, supplier, nation, region
+where c_custkey = o_custkey and l_orderkey = o_orderkey
+and l_suppkey = s_suppkey and c_nationkey = s_nationkey
+and s_nationkey = n_nationkey and n_regionkey = r_regionkey
+and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
+ and o_orderdate < date '1995-01-01' + interval '1' year
+group by n_name
+order by revenue desc;
+n_name revenue
+PERU 321915.8715
+ARGENTINA 69817.1451
+set optimizer_switch=@save_optimizer_switch;
+EXPLAIN select o_year,
+sum(case when nation = 'UNITED STATES' then volume else 0 end) /
+sum(volume) as mkt_share
+from (select extract(year from o_orderdate) as o_year,
+l_extendedprice * (1-l_discount) as volume,
+n2.n_name as nation
+from part, supplier, lineitem, orders, customer,
+nation n1, nation n2, region
+where p_partkey = l_partkey and s_suppkey = l_suppkey
+and l_orderkey = o_orderkey and o_custkey = c_custkey
+and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey
+and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey
+and o_orderdate between date '1995-01-01' and date '1996-12-31'
+ and p_type = 'STANDARD BRUSHED STEEL' ) as all_nations
+group by o_year
+order by o_year;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders ALL PRIMARY,i_o_orderdate,i_o_custkey NULL NULL NULL 1500 Using where; Using temporary; Using filesort
+1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
+1 SIMPLE n1 eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+1 SIMPLE part eq_ref PRIMARY PRIMARY 4 dbt3_s001.lineitem.l_partkey 1 Using where
+1 SIMPLE supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.lineitem.l_suppkey 1 Using where
+1 SIMPLE n2 eq_ref PRIMARY PRIMARY 4 dbt3_s001.supplier.s_nationkey 1
+select o_year,
+sum(case when nation = 'UNITED STATES' then volume else 0 end) /
+sum(volume) as mkt_share
+from (select extract(year from o_orderdate) as o_year,
+l_extendedprice * (1-l_discount) as volume,
+n2.n_name as nation
+from part, supplier, lineitem, orders, customer,
+nation n1, nation n2, region
+where p_partkey = l_partkey and s_suppkey = l_suppkey
+and l_orderkey = o_orderkey and o_custkey = c_custkey
+and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey
+and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey
+and o_orderdate between date '1995-01-01' and date '1996-12-31'
+ and p_type = 'STANDARD BRUSHED STEEL' ) as all_nations
+group by o_year
+order by o_year;
+o_year mkt_share
+1995 0.4495521838895718
+1996 0.024585468215352495
+EXPLAIN select nation, o_year, sum(amount) as sum_profit
+from (select n_name as nation,
+extract(year from o_orderdate) as o_year,
+l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount
+from part, supplier, lineitem, partsupp, orders, nation
+where s_suppkey = l_suppkey and ps_suppkey = l_suppkey
+and ps_partkey = l_partkey and p_partkey = l_partkey
+and o_orderkey = l_orderkey and s_nationkey = n_nationkey
+and p_name like '%green%') as profit
+group by nation, o_year
+order by nation, o_year desc;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE supplier index PRIMARY,i_s_nationkey i_s_nationkey 5 NULL 10 Using where; Using index; Using temporary; Using filesort
+1 SIMPLE nation eq_ref PRIMARY PRIMARY 4 dbt3_s001.supplier.s_nationkey 1
+1 SIMPLE partsupp ref PRIMARY,i_ps_partkey,i_ps_suppkey i_ps_suppkey 4 dbt3_s001.supplier.s_suppkey 70
+1 SIMPLE part eq_ref PRIMARY PRIMARY 4 dbt3_s001.partsupp.ps_partkey 1 Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey_partkey 10 dbt3_s001.partsupp.ps_partkey,dbt3_s001.supplier.s_suppkey 8
+1 SIMPLE orders eq_ref PRIMARY PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1
+select nation, o_year, sum(amount) as sum_profit
+from (select n_name as nation,
+extract(year from o_orderdate) as o_year,
+l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount
+from part, supplier, lineitem, partsupp, orders, nation
+where s_suppkey = l_suppkey and ps_suppkey = l_suppkey
+and ps_partkey = l_partkey and p_partkey = l_partkey
+and o_orderkey = l_orderkey and s_nationkey = n_nationkey
+and p_name like '%green%') as profit
+group by nation, o_year
+order by nation, o_year desc;
+nation o_year sum_profit
+ARGENTINA 1997 18247.873399999993
+ARGENTINA 1996 7731.089399999995
+ARGENTINA 1995 134490.5697
+ARGENTINA 1994 36767.101500000004
+ARGENTINA 1993 35857.08
+ARGENTINA 1992 35740
+ETHIOPIA 1998 2758.7801999999992
+ETHIOPIA 1997 19419.294599999997
+ETHIOPIA 1995 51231.87439999999
+ETHIOPIA 1994 3578.9478999999974
+ETHIOPIA 1992 1525.8234999999986
+IRAN 1998 37817.229600000006
+IRAN 1997 52643.77359999999
+IRAN 1996 70143.7761
+IRAN 1995 84094.58260000001
+IRAN 1994 18140.925599999995
+IRAN 1993 78655.1676
+IRAN 1992 87142.23960000002
+IRAQ 1998 22860.8082
+IRAQ 1997 93676.24359999999
+IRAQ 1996 45103.3242
+IRAQ 1994 36010.728599999995
+IRAQ 1993 33221.9399
+IRAQ 1992 47755.05900000001
+KENYA 1998 44194.831999999995
+KENYA 1997 57578.36259999999
+KENYA 1996 59195.90210000001
+KENYA 1995 79262.6278
+KENYA 1994 102360.66609999999
+KENYA 1993 128422.0196
+KENYA 1992 181517.2089
+MOROCCO 1998 41797.823199999984
+MOROCCO 1997 23685.801799999994
+MOROCCO 1996 62115.19579999998
+MOROCCO 1995 42442.64300000001
+MOROCCO 1994 48655.878000000004
+MOROCCO 1993 22926.744400000003
+MOROCCO 1992 32239.8088
+PERU 1998 86999.36459999997
+PERU 1997 121110.41070000001
+PERU 1996 177040.40759999995
+PERU 1995 122247.94520000002
+PERU 1994 88046.25329999998
+PERU 1993 49379.813799999996
+PERU 1992 80646.86050000001
+UNITED KINGDOM 1998 50577.25560000001
+UNITED KINGDOM 1997 114288.8605
+UNITED KINGDOM 1996 147684.46480000002
+UNITED KINGDOM 1995 225267.65759999998
+UNITED KINGDOM 1994 140595.5864
+UNITED KINGDOM 1993 322548.49210000003
+UNITED KINGDOM 1992 67747.88279999999
+UNITED STATES 1998 3957.0431999999996
+UNITED STATES 1997 94729.5704
+UNITED STATES 1996 79297.85670000002
+UNITED STATES 1995 62201.23360000001
+UNITED STATES 1994 43075.629899999985
+UNITED STATES 1993 27168.486199999996
+UNITED STATES 1992 34092.366
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='extended_keys=on';
+EXPLAIN select o_orderkey, p_partkey
+from part, lineitem, orders
+where p_retailprice > 1100 and o_orderdate='1997-01-01'
+and o_orderkey=l_orderkey and p_partkey=l_partkey;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE part range PRIMARY,i_p_retailprice i_p_retailprice 9 NULL 1 Using where; Using index
+1 SIMPLE orders ref PRIMARY,i_o_orderdate i_o_orderdate 4 const 1 Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_orderkey,i_l_orderkey_quantity i_l_partkey 9 dbt3_s001.part.p_partkey,dbt3_s001.orders.o_orderkey 1 Using index
+select o_orderkey, p_partkey
+from part, lineitem, orders
+where p_retailprice > 1100 and o_orderdate='1997-01-01'
+and o_orderkey=l_orderkey and p_partkey=l_partkey;
+o_orderkey p_partkey
+5895 200
+set optimizer_switch=@save_optimizer_switch;
+DROP DATABASE dbt3_s001;
+use test;
+#
+# Bug mdev-473: ANALYZE table locked for write
+#
+set use_stat_tables='complementary';
+create table t1 (i int);
+lock table t1 write;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+alter table t1 add column a varchar(8);
+drop table t1;
+#
+# Bug mdev-487: memory leak in ANALYZE with stat tables
+#
+SET use_stat_tables = 'preferably';
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+DELETE FROM t1 WHERE a=1;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+DROP TABLE t1;
+#
+# Bug mdev-518: corrupted/missing statistical tables
+#
+CREATE TABLE t1 (i int) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+FLUSH TABLE t1;
+SET use_stat_tables='never';
+EXPLAIN SELECT * FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2
+FLUSH TABLES;
+SET use_stat_tables='preferably';
+EXPLAIN SELECT * FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2
+DROP TABLE t1;
+set use_stat_tables=@save_use_stat_tables;
+set optimizer_switch=@save_optimizer_switch_for_stat_tables_test;
+SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/r/stat_tables_par.result b/mysql-test/r/stat_tables_par.result
new file mode 100644
index 00000000000..a98f934fa96
--- /dev/null
+++ b/mysql-test/r/stat_tables_par.result
@@ -0,0 +1,242 @@
+set @save_use_stat_tables=@@use_stat_tables;
+set use_stat_tables='preferably';
+DROP DATABASE IF EXISTS dbt3_s001;
+CREATE DATABASE dbt3_s001;
+use dbt3_s001;
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='extended_keys=off';
+select * from mysql.table_stats;
+db_name table_name cardinality
+dbt3_s001 customer 150
+dbt3_s001 lineitem 6005
+dbt3_s001 nation 25
+dbt3_s001 orders 1500
+dbt3_s001 part 200
+dbt3_s001 partsupp 700
+dbt3_s001 region 5
+dbt3_s001 supplier 10
+select * from mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 customer PRIMARY 1 1.0000
+dbt3_s001 customer i_c_nationkey 1 6.0000
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 nation PRIMARY 1 1.0000
+dbt3_s001 nation i_n_regionkey 1 5.0000
+dbt3_s001 orders PRIMARY 1 1.0000
+dbt3_s001 orders i_o_orderdate 1 1.3321
+dbt3_s001 orders i_o_custkey 1 15.0000
+dbt3_s001 part PRIMARY 1 1.0000
+dbt3_s001 partsupp PRIMARY 1 3.5000
+dbt3_s001 partsupp PRIMARY 2 1.0000
+dbt3_s001 partsupp i_ps_partkey 1 3.5000
+dbt3_s001 partsupp i_ps_suppkey 1 70.0000
+dbt3_s001 region PRIMARY 1 1.0000
+dbt3_s001 supplier PRIMARY 1 1.0000
+dbt3_s001 supplier i_s_nationkey 1 1.1111
+flush table lineitem;
+set use_stat_tables='never';
+select sum(l_extendedprice*l_discount) as revenue
+from lineitem
+where l_shipdate >= date '1994-01-01'
+and l_shipdate < date '1994-01-01' + interval '1' year
+and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+and l_quantity < 24;
+revenue
+77949.91860000002
+set debug_sync='statistics_mem_alloc_start1 WAIT_FOR second_thread_started_too';
+set debug_sync='statistics_mem_alloc_start2 SIGNAL first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+select sum(l_extendedprice*l_discount) as revenue
+from lineitem
+where l_shipdate >= date '1994-01-01'
+and l_shipdate < date '1994-01-01' + interval '1' year
+and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+and l_quantity < 24 ;
+set debug_sync='statistics_mem_alloc_start1 SIGNAL second_thread_started_too';
+set debug_sync='statistics_mem_alloc_start2 WAIT_FOR first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+select sum(l_extendedprice*l_discount) as revenue
+from lineitem
+where l_shipdate >= date '1994-01-01'
+and l_shipdate < date '1994-01-01' + interval '1' year
+and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+and l_quantity < 24;
+revenue
+77949.91860000002
+revenue
+77949.91860000002
+set use_stat_tables='preferably';
+set debug_sync='RESET';
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+delete from mysql.index_stats
+where table_name='lineitem' and
+index_name in ('i_l_shipdate', 'i_l_receiptdate');
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+analyze table lineitem persistent for columns() indexes (i_l_shipdate);
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+delete from mysql.index_stats
+where table_name='lineitem' and index_name= 'i_l_shipdate';
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+set debug_sync='statistics_collection_start1 WAIT_FOR second_thread_started_too';
+set debug_sync='statistics_collection_start2 SIGNAL first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+analyze table lineitem persistent for columns() indexes (i_l_shipdate);
+set debug_sync='statistics_collection_start1 SIGNAL second_thread_started_too';
+set debug_sync='statistics_collection_start2 WAIT_FOR first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+analyze table lineitem persistent for columns() indexes (i_l_receiptdate);
+set debug_sync='RESET';
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+select * from mysql.index_stats where table_name='lineitem'
+ order by index_name, prefix_arity;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+set debug_sync='RESET';
+set debug_sync='statistics_collection_start SIGNAL parked WAIT_FOR finish';
+use dbt3_s001;
+set use_stat_tables='preferably';
+analyze table lineitem persistent for all;
+set debug_sync='now WAIT_FOR parked';
+use dbt3_s001;
+set use_stat_tables='never';
+set debug_sync='now SIGNAL finish';
+set debug_sync='RESET';
+select * from mysql.index_stats where table_name='lineitem'
+ order by index_name, prefix_arity;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+set @save_global_use_stat_tables=@@global.use_stat_tables;
+set global use_stat_tables='preferably';
+set debug_sync='RESET';
+set debug_sync='statistics_update_start SIGNAL parker WAIT_FOR go1 EXECUTE 1';
+set debug_sync='thr_multi_lock_after_thr_lock SIGNAL go2 EXECUTE 2';
+use dbt3_s001;
+analyze table lineitem persistent for all;
+set debug_sync='open_and_process_table WAIT_FOR parker';
+set debug_sync='statistics_read_start SIGNAL go1 WAIT_FOR go2';
+use dbt3_s001;
+select * from mysql.index_stats, lineitem where index_name= 'i_l_shipdate' and l_orderkey=1 and l_partkey=68;
+db_name table_name index_name prefix_arity avg_frequency l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+dbt3_s001 lineitem i_l_shipdate 1 2.6500 1 68 9 2 36 34850.16 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+set debug_sync='RESET';
+set global use_stat_tables=@save_global_use_stat_tables;
+DROP DATABASE dbt3_s001;
+use test;
+set @save_global_use_stat_tables=@@global.use_stat_tables;
+set global use_stat_tables='preferably';
+set debug_sync='RESET';
+create table t1 (a int, b int, key(a));
+insert t1 values (1,1),(2,2);
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SET debug_sync='after_open_table_ignore_flush WAIT_FOR go';
+select * from information_schema.statistics where table_schema='test';
+select * from t1;
+a b
+1 1
+2 2
+SET DEBUG_SYNC= "now SIGNAL go";
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME NON_UNIQUE INDEX_SCHEMA INDEX_NAME SEQ_IN_INDEX COLUMN_NAME COLLATION CARDINALITY SUB_PART PACKED NULLABLE INDEX_TYPE COMMENT INDEX_COMMENT
+def test t1 1 test a 1 a A 2 NULL NULL YES BTREE
+set debug_sync='RESET';
+drop table t1;
+set global use_stat_tables=@save_global_use_stat_tables;
+set use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/r/stat_tables_par_innodb.result b/mysql-test/r/stat_tables_par_innodb.result
new file mode 100644
index 00000000000..e0c00c482a0
--- /dev/null
+++ b/mysql-test/r/stat_tables_par_innodb.result
@@ -0,0 +1,253 @@
+SET SESSION STORAGE_ENGINE='InnoDB';
+set @save_optimizer_switch_for_stat_tables_test=@@optimizer_switch;
+set optimizer_switch='extended_keys=on';
+set @save_use_stat_tables=@@use_stat_tables;
+set use_stat_tables='preferably';
+DROP DATABASE IF EXISTS dbt3_s001;
+CREATE DATABASE dbt3_s001;
+use dbt3_s001;
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='extended_keys=off';
+select * from mysql.table_stats;
+db_name table_name cardinality
+dbt3_s001 customer 150
+dbt3_s001 lineitem 6005
+dbt3_s001 nation 25
+dbt3_s001 orders 1500
+dbt3_s001 part 200
+dbt3_s001 partsupp 700
+dbt3_s001 region 5
+dbt3_s001 supplier 10
+select * from mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 customer PRIMARY 1 1.0000
+dbt3_s001 customer i_c_nationkey 1 6.0000
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 nation PRIMARY 1 1.0000
+dbt3_s001 nation i_n_regionkey 1 5.0000
+dbt3_s001 orders PRIMARY 1 1.0000
+dbt3_s001 orders i_o_orderdate 1 1.3321
+dbt3_s001 orders i_o_custkey 1 15.0000
+dbt3_s001 part PRIMARY 1 1.0000
+dbt3_s001 partsupp PRIMARY 1 3.5000
+dbt3_s001 partsupp PRIMARY 2 1.0000
+dbt3_s001 partsupp i_ps_partkey 1 3.5000
+dbt3_s001 partsupp i_ps_suppkey 1 70.0000
+dbt3_s001 region PRIMARY 1 1.0000
+dbt3_s001 supplier PRIMARY 1 1.0000
+dbt3_s001 supplier i_s_nationkey 1 1.1111
+flush table lineitem;
+set use_stat_tables='never';
+select sum(l_extendedprice*l_discount) as revenue
+from lineitem
+where l_shipdate >= date '1994-01-01'
+and l_shipdate < date '1994-01-01' + interval '1' year
+and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+and l_quantity < 24;
+revenue
+77949.91860000002
+set debug_sync='statistics_mem_alloc_start1 WAIT_FOR second_thread_started_too';
+set debug_sync='statistics_mem_alloc_start2 SIGNAL first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+select sum(l_extendedprice*l_discount) as revenue
+from lineitem
+where l_shipdate >= date '1994-01-01'
+and l_shipdate < date '1994-01-01' + interval '1' year
+and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+and l_quantity < 24 ;
+set debug_sync='statistics_mem_alloc_start1 SIGNAL second_thread_started_too';
+set debug_sync='statistics_mem_alloc_start2 WAIT_FOR first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+select sum(l_extendedprice*l_discount) as revenue
+from lineitem
+where l_shipdate >= date '1994-01-01'
+and l_shipdate < date '1994-01-01' + interval '1' year
+and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+and l_quantity < 24;
+revenue
+77949.91860000002
+revenue
+77949.91860000002
+set use_stat_tables='preferably';
+set debug_sync='RESET';
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+delete from mysql.index_stats
+where table_name='lineitem' and
+index_name in ('i_l_shipdate', 'i_l_receiptdate');
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+analyze table lineitem persistent for columns() indexes (i_l_shipdate);
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+delete from mysql.index_stats
+where table_name='lineitem' and index_name= 'i_l_shipdate';
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+set debug_sync='statistics_collection_start1 WAIT_FOR second_thread_started_too';
+set debug_sync='statistics_collection_start2 SIGNAL first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+analyze table lineitem persistent for columns() indexes (i_l_shipdate);
+set debug_sync='statistics_collection_start1 SIGNAL second_thread_started_too';
+set debug_sync='statistics_collection_start2 WAIT_FOR first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+analyze table lineitem persistent for columns() indexes (i_l_receiptdate);
+set debug_sync='RESET';
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+select * from mysql.index_stats where table_name='lineitem'
+ order by index_name, prefix_arity;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0033
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7160
+dbt3_s001 lineitem i_l_orderkey 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0033
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0250
+dbt3_s001 lineitem i_l_receiptdate 1 2.6477
+dbt3_s001 lineitem i_l_shipdate 1 2.6500
+dbt3_s001 lineitem i_l_suppkey 1 600.5000
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0250
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5786
+set debug_sync='RESET';
+set debug_sync='statistics_collection_start SIGNAL parked WAIT_FOR finish';
+use dbt3_s001;
+set use_stat_tables='preferably';
+analyze table lineitem persistent for all;
+set debug_sync='now WAIT_FOR parked';
+use dbt3_s001;
+set use_stat_tables='never';
+select * from lineitem where l_orderkey=1 and l_partkey=156;
+l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+1 156 4 1 17 17954.55 0.04 0.02 N O 1996-03-13 1996-02-12 1996-03-22 DELIVER IN PERSON TRUCK blithely regular ideas caj
+delete from lineitem where l_orderkey=1 and l_partkey=156;
+select * from lineitem where l_orderkey=1 and l_partkey=156;
+l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+set debug_sync='now SIGNAL finish';
+set debug_sync='RESET';
+select * from mysql.index_stats where table_name='lineitem'
+ order by index_name, prefix_arity;
+db_name table_name index_name prefix_arity avg_frequency
+dbt3_s001 lineitem PRIMARY 1 4.0027
+dbt3_s001 lineitem PRIMARY 2 1.0000
+dbt3_s001 lineitem i_l_commitdate 1 2.7155
+dbt3_s001 lineitem i_l_orderkey 1 4.0027
+dbt3_s001 lineitem i_l_orderkey_quantity 1 4.0027
+dbt3_s001 lineitem i_l_orderkey_quantity 2 1.0404
+dbt3_s001 lineitem i_l_partkey 1 30.0200
+dbt3_s001 lineitem i_l_receiptdate 1 2.6473
+dbt3_s001 lineitem i_l_shipdate 1 2.6496
+dbt3_s001 lineitem i_l_suppkey 1 600.4000
+dbt3_s001 lineitem i_l_suppkey_partkey 1 30.0200
+dbt3_s001 lineitem i_l_suppkey_partkey 2 8.5771
+set @save_global_use_stat_tables=@@global.use_stat_tables;
+set global use_stat_tables='preferably';
+set debug_sync='RESET';
+set debug_sync='statistics_update_start SIGNAL parker WAIT_FOR go1 EXECUTE 1';
+set debug_sync='thr_multi_lock_after_thr_lock SIGNAL go2 EXECUTE 2';
+use dbt3_s001;
+analyze table lineitem persistent for all;
+set debug_sync='open_and_process_table WAIT_FOR parker';
+set debug_sync='statistics_read_start SIGNAL go1 WAIT_FOR go2';
+use dbt3_s001;
+select * from mysql.index_stats, lineitem where index_name= 'i_l_shipdate' and l_orderkey=1 and l_partkey=68;
+db_name table_name index_name prefix_arity avg_frequency l_orderkey l_partkey l_suppkey l_linenumber l_quantity l_extendedprice l_discount l_tax l_returnflag l_linestatus l_shipDATE l_commitDATE l_receiptDATE l_shipinstruct l_shipmode l_comment
+dbt3_s001 lineitem i_l_shipdate 1 2.6496 1 68 9 2 36 34850.16 0.09 0.06 N O 1996-04-12 1996-02-28 1996-04-20 TAKE BACK RETURN MAIL slyly bold pinto beans detect s
+set debug_sync='RESET';
+set global use_stat_tables=@save_global_use_stat_tables;
+DROP DATABASE dbt3_s001;
+use test;
+set @save_global_use_stat_tables=@@global.use_stat_tables;
+set global use_stat_tables='preferably';
+set debug_sync='RESET';
+create table t1 (a int, b int, key(a));
+insert t1 values (1,1),(2,2);
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SET debug_sync='after_open_table_ignore_flush WAIT_FOR go';
+select * from information_schema.statistics where table_schema='test';
+select * from t1;
+a b
+1 1
+2 2
+SET DEBUG_SYNC= "now SIGNAL go";
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME NON_UNIQUE INDEX_SCHEMA INDEX_NAME SEQ_IN_INDEX COLUMN_NAME COLLATION CARDINALITY SUB_PART PACKED NULLABLE INDEX_TYPE COMMENT INDEX_COMMENT
+def test t1 1 test a 1 a A 2 NULL NULL YES BTREE
+set debug_sync='RESET';
+drop table t1;
+set global use_stat_tables=@save_global_use_stat_tables;
+set use_stat_tables=@save_use_stat_tables;
+set optimizer_switch=@save_optimizer_switch_for_stat_tables_test;
+SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/r/stat_tables_partition.result b/mysql-test/r/stat_tables_partition.result
new file mode 100644
index 00000000000..75454b61fe0
--- /dev/null
+++ b/mysql-test/r/stat_tables_partition.result
@@ -0,0 +1,12 @@
+#
+# Bug mdev-3866: valgrind complain from ANALYZE on a table with BIT field
+#
+SET use_stat_tables = 'preferably';
+CREATE TABLE t1 (pk int PRIMARY KEY, a bit(1), INDEX idx(a)
+) ENGINE=MyISAM PARTITION BY KEY(pk) PARTITIONS 2;
+INSERT INTO t1 VALUES (1,1),(2,0),(3,0),(4,1);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SET use_stat_tables = DEFAULT;
+DROP TABLE t1;
diff --git a/mysql-test/r/stat_tables_rbr.result b/mysql-test/r/stat_tables_rbr.result
new file mode 100644
index 00000000000..9e236f424bf
--- /dev/null
+++ b/mysql-test/r/stat_tables_rbr.result
@@ -0,0 +1,26 @@
+#
+# Bug mdev-463: assertion failure when running ANALYZE with RBR on
+#
+SET GLOBAL use_stat_tables = PREFERABLY;
+CREATE TABLE t1 (i INT) ENGINE=InnoDB;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+DROP TABLE t1;
+SET GLOBAL use_stat_tables = DEFAULT;
+SET use_stat_tables = PREFERABLY;
+CREATE TABLE t1 ( a INT ) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2;
+ALTER TABLE t1 ANALYZE PARTITION p1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SHOW BINLOG EVENTS;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 4 Format_desc 1 246 Server ver: #, Binlog ver: #
+master-bin.000001 246 Binlog_checkpoint 1 286 master-bin.000001
+master-bin.000001 286 Query 1 386 use `test`; CREATE TABLE t1 (i INT) ENGINE=InnoDB
+master-bin.000001 386 Query 1 465 use `test`; ANALYZE TABLE t1
+master-bin.000001 465 Query 1 569 use `test`; DROP TABLE `t1` /* generated by server */
+master-bin.000001 569 Query 1 705 use `test`; CREATE TABLE t1 ( a INT ) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2
+master-bin.000001 705 Query 1 803 use `test`; ALTER TABLE t1 ANALYZE PARTITION p1
+SET use_stat_tables = DEFAULT;
+DROP TABLE t1;
diff --git a/mysql-test/r/stat_tables_repl.result b/mysql-test/r/stat_tables_repl.result
new file mode 100644
index 00000000000..370b8c59db1
--- /dev/null
+++ b/mysql-test/r/stat_tables_repl.result
@@ -0,0 +1,41 @@
+include/master-slave.inc
+[connection master]
+#
+# Bug mdev-485: unexpected failure with replication of DROP/ALTER table
+# when RBR is on
+#
+CREATE TABLE t1 ( a int, b int ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (2,20), (1,20), (3,30);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+DROP TABLE t1;
+CREATE TABLE t1 ( a int, b int, INDEX idx1(b) ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (2,20), (1,20), (3,30);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+DROP INDEX idx1 ON t1;
+DROP TABLE t1;
+CREATE TABLE t1 ( a int, b int, INDEX idx1(b) ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (2,20), (1,20), (3,30);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+ALTER TABLE t1 DROP COLUMN b;
+DROP TABLE t1;
+CREATE TABLE t1 ( a int, b int, INDEX idx1(b) ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (2,20), (1,20), (3,30);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+ALTER TABLE t1 RENAME to s;
+DROP TABLE s;
+CREATE TABLE t1 ( a int, b int, INDEX idx1(b) ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (2,20), (1,20), (3,30);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+ALTER TABLE t1 CHANGE COLUMN b c int ;
+DROP TABLE t1;
+include/rpl_end.inc
diff --git a/mysql-test/r/statistics.result b/mysql-test/r/statistics.result
new file mode 100644
index 00000000000..ba0390f98db
--- /dev/null
+++ b/mysql-test/r/statistics.result
@@ -0,0 +1,1429 @@
+drop table if exists t1,t2;
+set @save_use_stat_tables=@@use_stat_tables;
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+set use_stat_tables='preferably';
+CREATE TABLE t1 (
+a int NOT NULL PRIMARY KEY,
+b varchar(32),
+c char(16),
+d date,
+e double,
+f bit(3),
+INDEX idx1 (b, e),
+INDEX idx2 (c, d),
+INDEX idx3 (d),
+INDEX idx4 (e, b, d)
+) ENGINE= MYISAM;
+INSERT INTO t1 VALUES
+(0, NULL, NULL, NULL, NULL, NULL),
+(7, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'dddddddd', '1990-05-15', 0.1, b'100'),
+(17, 'vvvvvvvvvvvvv', 'aaaa', '1989-03-12', 0.01, b'101'),
+(1, 'vvvvvvvvvvvvv', NULL, '1989-03-12', 0.01, b'100'),
+(12, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'dddddddd', '1999-07-23', 0.112, b'001'),
+(23, 'vvvvvvvvvvvvv', 'dddddddd', '1999-07-23', 0.1, b'100'),
+(8, 'vvvvvvvvvvvvv', 'aaaa', '1999-07-23', 0.1, b'100'),
+(22, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'aaaa', '1989-03-12', 0.112, b'001'),
+(31, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'aaaa', '1999-07-23', 0.01, b'001'),
+(10, NULL, 'aaaa', NULL, 0.01, b'010'),
+(5, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'dddddddd', '1999-07-23', 0.1, b'100'),
+(15, 'vvvvvvvvvvvvv', 'ccccccccc', '1990-05-15', 0.1, b'010'),
+(30, NULL, 'bbbbbb', NULL, NULL, b'100'),
+(38, 'zzzzzzzzzzzzzzzzzz', 'bbbbbb', NULL, NULL, NULL),
+(18, 'zzzzzzzzzzzzzzzzzz', 'ccccccccc', '1990-05-15', 0.01, b'010'),
+(9, 'yyy', 'bbbbbb', '1998-08-28', 0.01, NULL),
+(29, 'vvvvvvvvvvvvv', 'dddddddd', '1999-07-23', 0.012, b'010'),
+(3, 'yyy', 'dddddddd', '1990-05-15', 0.112, b'010'),
+(39, 'zzzzzzzzzzzzzzzzzz', 'bbbbbb', NULL, 0.01, b'100'),
+(14, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'ccccccccc', '1990-05-15', 0.1, b'100'),
+(40, 'zzzzzzzzzzzzzzzzzz', 'bbbbbb', '1989-03-12', NULL, NULL),
+(44, NULL, 'aaaa', '1989-03-12', NULL, b'010'),
+(19, 'vvvvvvvvvvvvv', 'ccccccccc', '1990-05-15', 0.012, b'011'),
+(21, 'zzzzzzzzzzzzzzzzzz', 'dddddddd', '1989-03-12', 0.112, b'100'),
+(45, NULL, NULL, '1989-03-12', NULL, b'011'),
+(2, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'ccccccccc', '1990-05-15', 0.1, b'001'),
+(35, 'yyy', 'aaaa', '1990-05-15', 0.05, b'011'),
+(4, 'vvvvvvvvvvvvv', 'dddddddd', '1999-07-23', 0.01, b'101'),
+(47, NULL, 'aaaa', '1990-05-15', 0.05, b'010'),
+(42, NULL, 'ccccccccc', '1989-03-12', 0.01, b'010'),
+(32, NULL, 'bbbbbb', '1990-05-15', 0.01, b'011'),
+(49, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww' , 'aaaa', '1990-05-15', NULL, NULL),
+(43, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww' , 'bbbbbb', '1990-05-15', NULL, b'100'),
+(37, 'yyy', NULL, '1989-03-12', 0.05, b'011'),
+(41, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'ccccccccc', '1990-05-15', 0.05, NULL),
+(34, 'yyy', NULL, NULL, NULL, NULL),
+(33, 'zzzzzzzzzzzzzzzzzz', 'dddddddd', '1989-03-12', 0.05, b'011'),
+(24, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'dddddddd', '1990-05-15', 0.01, b'101'),
+(11, 'yyy', 'ccccccccc', '1999-07-23', 0.1, NULL),
+(25, 'zzzzzzzzzzzzzzzzzz', 'bbb', '1989-03-12', 0.01, b'101');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test t1 40
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx1 1 6.4000
+test t1 idx1 2 1.6875
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx4 3 1.1304
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+40
+SELECT * FROM mysql.column_stats
+WHERE db_name='test' AND table_name='t1' AND column_name='a';
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+SELECT MIN(t1.a), MAX(t1.a),
+(SELECT COUNT(*) FROM t1 WHERE t1.b IS NULL) /
+(SELECT COUNT(*) FROM t1) AS "NULLS_RATIO(t1.a)",
+(SELECT COUNT(t1.a) FROM t1) /
+(SELECT COUNT(DISTINCT t1.a) FROM t1) AS "AVG_FREQUENCY(t1.a)"
+FROM t1;
+MIN(t1.a) MAX(t1.a) NULLS_RATIO(t1.a) AVG_FREQUENCY(t1.a)
+0 49 0.2000 1.0000
+SELECT * FROM mysql.column_stats
+WHERE db_name='test' AND table_name='t1' AND column_name='b';
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+SELECT MIN(t1.b), MAX(t1.b),
+(SELECT COUNT(*) FROM t1 WHERE t1.b IS NULL) /
+(SELECT COUNT(*) FROM t1) AS "NULLS_RATIO(t1.b)",
+(SELECT COUNT(t1.b) FROM t1) /
+(SELECT COUNT(DISTINCT t1.b) FROM t1) AS "AVG_FREQUENCY(t1.b)"
+FROM t1;
+MIN(t1.b) MAX(t1.b) NULLS_RATIO(t1.b) AVG_FREQUENCY(t1.b)
+vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 6.4000
+SELECT * FROM mysql.column_stats
+WHERE db_name='test' AND table_name='t1' AND column_name='c';
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+SELECT MIN(t1.c), MAX(t1.c),
+(SELECT COUNT(*) FROM t1 WHERE t1.c IS NULL) /
+(SELECT COUNT(*) FROM t1) AS "NULLS_RATIO(t1.c)",
+(SELECT COUNT(t1.c) FROM t1) /
+(SELECT COUNT(DISTINCT t1.c) FROM t1) AS "AVG_FREQUENCY(t1.c)"
+FROM t1;
+MIN(t1.c) MAX(t1.c) NULLS_RATIO(t1.c) AVG_FREQUENCY(t1.c)
+aaaa dddddddd 0.1250 7.0000
+SELECT * FROM mysql.column_stats
+WHERE db_name='test' AND table_name='t1' AND column_name='d';
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+SELECT MIN(t1.d), MAX(t1.d),
+(SELECT COUNT(*) FROM t1 WHERE t1.d IS NULL) /
+(SELECT COUNT(*) FROM t1) AS "NULLS_RATIO(t1.d)",
+(SELECT COUNT(t1.d) FROM t1) /
+(SELECT COUNT(DISTINCT t1.d) FROM t1) AS "AVG_FREQUENCY(t1.d)"
+FROM t1;
+MIN(t1.d) MAX(t1.d) NULLS_RATIO(t1.d) AVG_FREQUENCY(t1.d)
+1989-03-12 1999-07-23 0.1500 8.5000
+SELECT * FROM mysql.column_stats
+WHERE db_name='test' AND table_name='t1' AND column_name='e';
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+SELECT MIN(t1.e), MAX(t1.e),
+(SELECT COUNT(*) FROM t1 WHERE t1.e IS NULL) /
+(SELECT COUNT(*) FROM t1) AS "NULLS_RATIO(t1.e)",
+(SELECT COUNT(t1.e) FROM t1) /
+(SELECT COUNT(DISTINCT t1.e) FROM t1) AS "AVG_FREQUENCY(t1.e)"
+FROM t1;
+MIN(t1.e) MAX(t1.e) NULLS_RATIO(t1.e) AVG_FREQUENCY(t1.e)
+0.01 0.112 0.2250 6.2000
+SELECT * FROM mysql.index_stats
+WHERE db_name='test' AND table_name='t1' AND index_name='idx1';
+db_name table_name index_name prefix_arity avg_frequency
+test t1 idx1 1 6.4000
+test t1 idx1 2 1.6875
+SELECT
+(SELECT COUNT(*) FROM t1 WHERE t1.b IS NOT NULL) /
+(SELECT COUNT(DISTINCT t1.b) FROM t1 WHERE t1.b IS NOT NULL)
+AS 'ARITY 1',
+(SELECT COUNT(*) FROM t1 WHERE t1.b IS NOT NULL AND t1.e IS NOT NULL) /
+(SELECT COUNT(DISTINCT t1.b, t1.e) FROM t1
+WHERE t1.b IS NOT NULL AND t1.e IS NOT NULL)
+AS 'ARITY 2';
+ARITY 1 ARITY 2
+6.4000 1.6875
+SELECT * FROM mysql.index_stats
+WHERE db_name='test' AND table_name='t1' AND index_name='idx2';
+db_name table_name index_name prefix_arity avg_frequency
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+SELECT
+(SELECT COUNT(*) FROM t1 WHERE t1.c IS NOT NULL) /
+(SELECT COUNT(DISTINCT t1.c) FROM t1 WHERE t1.c IS NOT NULL)
+AS 'ARITY 1',
+(SELECT COUNT(*) FROM t1 WHERE t1.c IS NOT NULL AND t1.d IS NOT NULL) /
+(SELECT COUNT(DISTINCT t1.c, t1.d) FROM t1
+WHERE t1.c IS NOT NULL AND t1.d IS NOT NULL)
+AS 'ARITY 2';
+ARITY 1 ARITY 2
+7.0000 2.3846
+SELECT * FROM mysql.index_stats
+WHERE db_name='test' AND table_name='t1' AND index_name='idx3';
+db_name table_name index_name prefix_arity avg_frequency
+test t1 idx3 1 8.5000
+SELECT
+(SELECT COUNT(*) FROM t1 WHERE t1.d IS NOT NULL) /
+(SELECT COUNT(DISTINCT t1.d) FROM t1 WHERE t1.d IS NOT NULL)
+AS 'ARITY 1';
+ARITY 1
+8.5000
+SELECT * FROM mysql.index_stats
+WHERE db_name='test' AND table_name='t1' AND index_name='idx4';
+db_name table_name index_name prefix_arity avg_frequency
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx4 3 1.1304
+SELECT
+(SELECT COUNT(*) FROM t1 WHERE t1.e IS NOT NULL) /
+(SELECT COUNT(DISTINCT t1.e) FROM t1 WHERE t1.e IS NOT NULL)
+AS 'ARITY 1',
+(SELECT COUNT(*) FROM t1 WHERE t1.e IS NOT NULL AND t1.b IS NOT NULL) /
+(SELECT COUNT(DISTINCT t1.e, t1.b) FROM t1
+WHERE t1.e IS NOT NULL AND t1.b IS NOT NULL)
+AS 'ARITY 2',
+(SELECT COUNT(*) FROM t1
+WHERE t1.e IS NOT NULL AND t1.b IS NOT NULL AND t1.d IS NOT NULL) /
+(SELECT COUNT(DISTINCT t1.e, t1.b, t1.d) FROM t1
+WHERE t1.e IS NOT NULL AND t1.b IS NOT NULL AND t1.d IS NOT NULL)
+AS 'ARITY 3';
+ARITY 1 ARITY 2 ARITY 3
+6.2000 1.6875 1.1304
+CREATE TABLE t3 (
+a int NOT NULL PRIMARY KEY,
+b varchar(32),
+c char(16),
+INDEX idx (c)
+) ENGINE=MYISAM;
+INSERT INTO t3 VALUES
+(0, NULL, NULL),
+(7, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'dddddddd'),
+(17, 'vvvvvvvvvvvvv', 'aaaa'),
+(1, 'vvvvvvvvvvvvv', NULL),
+(12, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'dddddddd'),
+(23, 'vvvvvvvvvvvvv', 'dddddddd'),
+(8, 'vvvvvvvvvvvvv', 'aaaa'),
+(22, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'aaaa'),
+(31, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'aaaa'),
+(10, NULL, 'aaaa'),
+(5, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'dddddddd'),
+(15, 'vvvvvvvvvvvvv', 'ccccccccc'),
+(30, NULL, 'bbbbbb'),
+(38, 'zzzzzzzzzzzzzzzzzz', 'bbbbbb'),
+(18, 'zzzzzzzzzzzzzzzzzz', 'ccccccccc'),
+(9, 'yyy', 'bbbbbb'),
+(29, 'vvvvvvvvvvvvv', 'dddddddd');
+ANALYZE TABLE t3;
+Table Op Msg_type Msg_text
+test.t3 analyze status OK
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test t1 40
+test t3 17
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+test t3 a 0 38 0.0000 4.0000 1.0000
+test t3 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.1765 18.0714 2.8000
+test t3 c aaaa dddddddd 0.1176 6.4000 3.7500
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx1 1 6.4000
+test t1 idx1 2 1.6875
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx4 3 1.1304
+test t3 PRIMARY 1 1.0000
+test t3 idx 1 3.7500
+ALTER TABLE t1 RENAME TO s1;
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test s1 40
+test t3 17
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test s1 a 0 49 0.0000 4.0000 1.0000
+test s1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test s1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test s1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test s1 e 0.01 0.112 0.2250 8.0000 6.2000
+test s1 f 1 5 0.2000 1.0000 6.4000
+test t3 a 0 38 0.0000 4.0000 1.0000
+test t3 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.1765 18.0714 2.8000
+test t3 c aaaa dddddddd 0.1176 6.4000 3.7500
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test s1 PRIMARY 1 1.0000
+test s1 idx1 1 6.4000
+test s1 idx1 2 1.6875
+test s1 idx2 1 7.0000
+test s1 idx2 2 2.3846
+test s1 idx3 1 8.5000
+test s1 idx4 1 6.2000
+test s1 idx4 2 1.6875
+test s1 idx4 3 1.1304
+test t3 PRIMARY 1 1.0000
+test t3 idx 1 3.7500
+RENAME TABLE s1 TO t1;
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test t1 40
+test t3 17
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+test t3 a 0 38 0.0000 4.0000 1.0000
+test t3 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.1765 18.0714 2.8000
+test t3 c aaaa dddddddd 0.1176 6.4000 3.7500
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx1 1 6.4000
+test t1 idx1 2 1.6875
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx4 3 1.1304
+test t3 PRIMARY 1 1.0000
+test t3 idx 1 3.7500
+DROP TABLE t3;
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test t1 40
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx1 1 6.4000
+test t1 idx1 2 1.6875
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx4 3 1.1304
+CREATE TEMPORARY TABLE t0 (
+a int NOT NULL PRIMARY KEY,
+b varchar(32)
+);
+INSERT INTO t0 SELECT a,b FROM t1;
+ALTER TABLE t1 CHANGE COLUMN b x varchar(32),
+CHANGE COLUMN e y double;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `x` varchar(32) DEFAULT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `y` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx1` (`x`,`y`),
+ KEY `idx2` (`c`,`d`),
+ KEY `idx3` (`d`),
+ KEY `idx4` (`y`,`x`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 x vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 y 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+ALTER TABLE t1 CHANGE COLUMN x b varchar(32),
+CHANGE COLUMN y e double;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` varchar(32) DEFAULT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx1` (`b`,`e`),
+ KEY `idx2` (`c`,`d`),
+ KEY `idx3` (`d`),
+ KEY `idx4` (`e`,`b`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+ALTER TABLE t1 RENAME TO s1, CHANGE COLUMN b x varchar(32);
+SHOW CREATE TABLE s1;
+Table Create Table
+s1 CREATE TABLE `s1` (
+ `a` int(11) NOT NULL,
+ `x` varchar(32) DEFAULT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx1` (`x`,`e`),
+ KEY `idx2` (`c`,`d`),
+ KEY `idx3` (`d`),
+ KEY `idx4` (`e`,`x`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test s1 40
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test s1 a 0 49 0.0000 4.0000 1.0000
+test s1 x vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test s1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test s1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test s1 e 0.01 0.112 0.2250 8.0000 6.2000
+test s1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test s1 PRIMARY 1 1.0000
+test s1 idx1 1 6.4000
+test s1 idx1 2 1.6875
+test s1 idx2 1 7.0000
+test s1 idx2 2 2.3846
+test s1 idx3 1 8.5000
+test s1 idx4 1 6.2000
+test s1 idx4 2 1.6875
+test s1 idx4 3 1.1304
+ALTER TABLE s1 RENAME TO t1, CHANGE COLUMN x b varchar(32);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` varchar(32) DEFAULT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx1` (`b`,`e`),
+ KEY `idx2` (`c`,`d`),
+ KEY `idx3` (`d`),
+ KEY `idx4` (`e`,`b`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test t1 40
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx1 1 6.4000
+test t1 idx1 2 1.6875
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx4 3 1.1304
+ALTER TABLE t1 CHANGE COLUMN b x varchar(30);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `x` varchar(30) DEFAULT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx1` (`x`,`e`),
+ KEY `idx2` (`c`,`d`),
+ KEY `idx3` (`d`),
+ KEY `idx4` (`e`,`x`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+ALTER TABLE t1 CHANGE COLUMN x b varchar(32);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` varchar(32) DEFAULT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx1` (`b`,`e`),
+ KEY `idx2` (`c`,`d`),
+ KEY `idx3` (`d`),
+ KEY `idx4` (`e`,`b`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS(b) INDEXES(idx1, idx4);
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx4 3 1.1304
+test t1 idx4 2 1.6875
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+test t1 idx4 1 6.2000
+test t1 idx1 2 1.6875
+test t1 idx1 1 6.4000
+SELECT * INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/save_column_stats'
+ FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\n'
+ FROM mysql.column_stats WHERE column_name='b';
+SELECT * INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/save_index_stats'
+ FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\n'
+ FROM mysql.index_stats WHERE index_name IN ('idx1', 'idx4');
+ALTER TABLE t1 CHANGE COLUMN b x varchar(30);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `x` varchar(30) DEFAULT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx1` (`x`,`e`),
+ KEY `idx2` (`c`,`d`),
+ KEY `idx3` (`d`),
+ KEY `idx4` (`e`,`x`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+ALTER TABLE t1 CHANGE COLUMN x b varchar(32);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` varchar(32) DEFAULT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx1` (`b`,`e`),
+ KEY `idx2` (`c`,`d`),
+ KEY `idx3` (`d`),
+ KEY `idx4` (`e`,`b`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/save_column_stats'
+ INTO TABLE mysql.column_stats
+FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\n';
+LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/save_index_stats'
+ INTO TABLE mysql.index_stats
+FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\n';
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx4 3 1.1304
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+test t1 idx4 2 1.6875
+test t1 idx4 1 6.2000
+test t1 idx1 2 1.6875
+test t1 idx1 1 6.4000
+ALTER TABLE t1 DROP COLUMN b;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx1` (`e`),
+ KEY `idx2` (`c`,`d`),
+ KEY `idx3` (`d`),
+ KEY `idx4` (`e`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx3 1 8.5000
+DROP INDEX idx2 ON t1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx1` (`e`),
+ KEY `idx3` (`d`),
+ KEY `idx4` (`e`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx3 1 8.5000
+DROP INDEX idx1 ON t1;
+DROP INDEX idx4 ON t1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx3` (`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+ALTER TABLE t1 ADD COLUMN b varchar(32);
+CREATE INDEX idx1 ON t1(b, e);
+CREATE INDEX idx2 ON t1(c, d);
+CREATE INDEX idx4 ON t1(e, b, d);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ `b` varchar(32) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx3` (`d`),
+ KEY `idx1` (`b`,`e`),
+ KEY `idx2` (`c`,`d`),
+ KEY `idx4` (`e`,`b`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx3 1 8.5000
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS(b) INDEXES(idx1, idx2, idx4);
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 b NULL NULL 1.0000 NULL NULL
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx2 1 7.0000
+test t1 idx1 2 NULL
+test t1 idx1 1 NULL
+test t1 idx3 1 8.5000
+test t1 idx2 2 2.3846
+test t1 idx4 1 6.2000
+test t1 idx4 2 NULL
+test t1 idx4 3 NULL
+UPDATE t1 SET b=(SELECT b FROM t0 WHERE t0.a= t1.a);
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS(b) INDEXES(idx1, idx2, idx4);
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx2 1 7.0000
+test t1 idx1 2 1.6875
+test t1 idx1 1 6.4000
+test t1 idx3 1 8.5000
+test t1 idx2 2 2.3846
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx4 3 1.1304
+ALTER TABLE t1 DROP COLUMN b,
+DROP INDEX idx1, DROP INDEX idx2, DROP INDEX idx4;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx3` (`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx3 1 8.5000
+ALTER TABLE t1 ADD COLUMN b varchar(32);
+ALTER TABLE t1
+ADD INDEX idx1 (b, e), ADD INDEX idx2 (c, d), ADD INDEX idx4 (e, b, d);
+UPDATE t1 SET b=(SELECT b FROM t0 WHERE t0.a= t1.a);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `c` char(16) DEFAULT NULL,
+ `d` date DEFAULT NULL,
+ `e` double DEFAULT NULL,
+ `f` bit(3) DEFAULT NULL,
+ `b` varchar(32) DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `idx3` (`d`),
+ KEY `idx1` (`b`,`e`),
+ KEY `idx2` (`c`,`d`),
+ KEY `idx4` (`e`,`b`,`d`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx3 1 8.5000
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS(b) INDEXES(idx1, idx2, idx4);
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx3 1 8.5000
+test t1 idx2 2 2.3846
+test t1 idx2 1 7.0000
+test t1 idx1 2 1.6875
+test t1 idx1 1 6.4000
+test t1 idx4 3 1.1304
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS() INDEXES();
+Table Op Msg_type Msg_text
+test.t1 analyze status Table is already up to date
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test t1 40
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS(c,e,b) INDEXES(idx2,idx4);
+Table Op Msg_type Msg_text
+test.t1 analyze status Table is already up to date
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test t1 40
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx4 3 1.1304
+DELETE FROM mysql.index_stats WHERE table_name='t1' AND index_name='primary';
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx4 3 1.1304
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS() INDEXES(primary);
+Table Op Msg_type Msg_text
+test.t1 analyze status Table is already up to date
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx4 3 1.1304
+test t1 PRIMARY 1 1.0000
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS ALL INDEXES ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze status Table is already up to date
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test t1 40
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx3 1 8.5000
+test t1 idx1 1 6.4000
+test t1 idx1 2 1.6875
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t1 idx4 3 1.1304
+CREATE TABLE t2 LIKE t1;
+ALTER TABLE t2 ENGINE=InnoDB;
+INSERT INTO t2 SELECT * FROM t1;
+set optimizer_switch='extended_keys=off';
+ANALYZE TABLE t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status OK
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test t1 40
+test t2 40
+SELECT * FROM mysql.column_stats ORDER BY column_name;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t2 a 0 49 0.0000 4.0000 1.0000
+test t1 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t2 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t2 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t2 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t2 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+test t2 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t2 PRIMARY 1 1.0000
+test t1 idx1 1 6.4000
+test t2 idx1 1 6.4000
+test t1 idx1 2 1.6875
+test t2 idx1 2 1.6875
+test t1 idx2 1 7.0000
+test t2 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t2 idx2 2 2.3846
+test t1 idx3 1 8.5000
+test t2 idx3 1 8.5000
+test t1 idx4 1 6.2000
+test t2 idx4 1 6.2000
+test t1 idx4 2 1.6875
+test t2 idx4 2 1.6875
+test t1 idx4 3 1.1304
+test t2 idx4 3 1.1304
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+set optimizer_switch='extended_keys=on';
+ANALYZE TABLE t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status OK
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test t2 40
+SELECT * FROM mysql.column_stats ORDER BY column_name;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t2 a 0 49 0.0000 4.0000 1.0000
+test t2 b vvvvvvvvvvvvv zzzzzzzzzzzzzzzzzz 0.2000 17.1250 6.4000
+test t2 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t2 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t2 e 0.01 0.112 0.2250 8.0000 6.2000
+test t2 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 PRIMARY 1 1.0000
+test t2 idx1 1 6.4000
+test t2 idx1 2 1.6875
+test t2 idx1 3 1.0000
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx2 3 1.0000
+test t2 idx3 1 8.5000
+test t2 idx3 2 1.0000
+test t2 idx4 1 6.2000
+test t2 idx4 2 1.6875
+test t2 idx4 3 1.1304
+test t2 idx4 4 1.0000
+ALTER TABLE t2 DROP PRIMARY KEY, DROP INDEX idx1;
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx3 1 8.5000
+test t2 idx4 1 6.2000
+test t2 idx4 2 1.6875
+test t2 idx4 3 1.1304
+UPDATE t2 SET b=0 WHERE b IS NULL;
+ALTER TABLE t2 ADD PRIMARY KEY (a,b);
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx3 1 8.5000
+test t2 idx4 1 6.2000
+test t2 idx4 2 1.6875
+test t2 idx4 3 1.1304
+ANALYZE TABLE t2 PERSISTENT FOR COLUMNS() INDEXES ALL;
+Table Op Msg_type Msg_text
+test.t2 analyze status OK
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 PRIMARY 1 1.0000
+test t2 PRIMARY 2 1.0000
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx2 3 1.0000
+test t2 idx2 4 1.0000
+test t2 idx3 1 8.5000
+test t2 idx3 2 1.0000
+test t2 idx3 3 1.0000
+test t2 idx4 1 6.2000
+test t2 idx4 2 1.7222
+test t2 idx4 3 1.1154
+test t2 idx4 4 1.0000
+ALTER TABLE t2 CHANGE COLUMN b b varchar(30);
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx3 1 8.5000
+ANALYZE TABLE t2 PERSISTENT FOR COLUMNS ALL INDEXES ALL;
+Table Op Msg_type Msg_text
+test.t2 analyze status OK
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 PRIMARY 1 1.0000
+test t2 PRIMARY 2 1.0000
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx2 3 1.0000
+test t2 idx2 4 1.0000
+test t2 idx3 1 8.5000
+test t2 idx3 2 1.0000
+test t2 idx3 3 1.0000
+test t2 idx4 1 6.2000
+test t2 idx4 2 1.7222
+test t2 idx4 3 1.1154
+test t2 idx4 4 1.0000
+ALTER TABLE t2 CHANGE COLUMN b b varchar(32);
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx3 1 8.5000
+ANALYZE TABLE t2 PERSISTENT FOR COLUMNS ALL INDEXES ALL;
+Table Op Msg_type Msg_text
+test.t2 analyze status OK
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 PRIMARY 1 1.0000
+test t2 PRIMARY 2 1.0000
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx2 3 1.0000
+test t2 idx2 4 1.0000
+test t2 idx3 1 8.5000
+test t2 idx3 2 1.0000
+test t2 idx3 3 1.0000
+test t2 idx4 1 6.2000
+test t2 idx4 2 1.7222
+test t2 idx4 3 1.1154
+test t2 idx4 4 1.0000
+ALTER TABLE t2 DROP COLUMN b;
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx3 1 8.5000
+ANALYZE TABLE t2 PERSISTENT FOR COLUMNS() INDEXES ALL;
+Table Op Msg_type Msg_text
+test.t2 analyze status OK
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 PRIMARY 1 1.0000
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx2 3 1.0000
+test t2 idx3 1 8.5000
+test t2 idx3 2 1.0000
+test t2 idx4 1 6.2000
+test t2 idx4 2 2.2308
+test t2 idx4 3 1.0000
+set optimizer_switch='extended_keys=off';
+ALTER TABLE t1
+DROP INDEX idx1,
+DROP INDEX idx4;
+ALTER TABLE t1
+MODIFY COLUMN b text,
+ADD INDEX idx1 (b(4), e),
+ADD INDEX idx4 (e, b(4), d);
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t2 a 0 49 0.0000 4.0000 1.0000
+test t2 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t2 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t2 e 0.01 0.112 0.2250 8.0000 6.2000
+test t2 f 1 5 0.2000 1.0000 6.4000
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 idx3 1 8.5000
+test t2 idx3 2 1.0000
+test t2 idx2 3 1.0000
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx4 3 1.0000
+test t2 idx4 1 6.2000
+test t2 idx4 2 2.2308
+test t2 PRIMARY 1 1.0000
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t2 a 0 49 0.0000 4.0000 1.0000
+test t2 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t2 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t2 e 0.01 0.112 0.2250 8.0000 6.2000
+test t2 f 1 5 0.2000 1.0000 6.4000
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+test t1 b NULL NULL 0.2000 17.1250 NULL
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t2 idx3 1 8.5000
+test t2 idx3 2 1.0000
+test t2 idx2 3 1.0000
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t1 idx2 1 7.0000
+test t1 idx3 1 8.5000
+test t1 PRIMARY 1 1.0000
+test t2 idx4 3 1.0000
+test t2 idx4 1 6.2000
+test t2 idx4 2 2.2308
+test t2 PRIMARY 1 1.0000
+test t1 idx2 2 2.3846
+test t1 idx1 1 NULL
+test t1 idx1 2 NULL
+test t1 idx4 1 6.2000
+test t1 idx4 2 NULL
+test t1 idx4 3 NULL
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+ANALYZE TABLE mysql.column_stats PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+mysql.column_stats analyze error Invalid argument
+ANALYZE TABLE mysql.column_stats;
+Table Op Msg_type Msg_text
+mysql.column_stats analyze status OK
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+set use_stat_tables='never';
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+Table Op Msg_type Msg_text
+test.t1 analyze status Table is already up to date
+SELECT * FROM mysql.table_stats;
+db_name table_name cardinality
+test t1 40
+SELECT * FROM mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency
+test t1 a 0 49 0.0000 4.0000 1.0000
+test t1 c aaaa dddddddd 0.1250 6.6571 7.0000
+test t1 d 1989-03-12 1999-07-23 0.1500 3.0000 8.5000
+test t1 e 0.01 0.112 0.2250 8.0000 6.2000
+test t1 f 1 5 0.2000 1.0000 6.4000
+test t1 b NULL NULL 0.2000 17.1250 NULL
+SELECT * FROM mysql.index_stats;
+db_name table_name index_name prefix_arity avg_frequency
+test t1 PRIMARY 1 1.0000
+test t1 idx3 1 8.5000
+test t1 idx2 1 7.0000
+test t1 idx2 2 2.3846
+test t1 idx1 1 NULL
+test t1 idx1 2 NULL
+test t1 idx4 1 6.2000
+test t1 idx4 2 NULL
+test t1 idx4 3 NULL
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+DROP TABLE t1,t2;
+set names utf8;
+CREATE DATABASE world;
+use world;
+CREATE TABLE Country (
+Code char(3) NOT NULL default '',
+Name char(52) NOT NULL default '',
+SurfaceArea float(10,2) NOT NULL default '0.00',
+Population int(11) NOT NULL default '0',
+Capital int(11) default NULL,
+PRIMARY KEY (Code),
+UNIQUE INDEX (Name)
+) CHARACTER SET utf8 COLLATE utf8_bin;
+CREATE TABLE City (
+ID int(11) NOT NULL auto_increment,
+Name char(35) NOT NULL default '',
+Country char(3) NOT NULL default '',
+Population int(11) NOT NULL default '0',
+PRIMARY KEY (ID),
+INDEX (Population),
+INDEX (Country)
+) CHARACTER SET utf8 COLLATE utf8_bin;
+CREATE TABLE CountryLanguage (
+Country char(3) NOT NULL default '',
+Language char(30) NOT NULL default '',
+Percentage float(3,1) NOT NULL default '0.0',
+PRIMARY KEY (Country, Language),
+INDEX (Percentage)
+) CHARACTER SET utf8 COLLATE utf8_bin;
+set use_stat_tables='preferably';
+ANALYZE TABLE Country, City, CountryLanguage;
+SELECT UPPER(db_name), UPPER(table_name), cardinality
+FROM mysql.table_stats;
+UPPER(db_name) UPPER(table_name) cardinality
+WORLD COUNTRY 239
+WORLD CITY 4079
+WORLD COUNTRYLANGUAGE 984
+SELECT UPPER(db_name), UPPER(table_name),
+column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency
+FROM mysql.column_stats;
+UPPER(db_name) UPPER(table_name) column_name min_value max_value nulls_ratio avg_length avg_frequency
+WORLD COUNTRY Code ABW ZWE 0.0000 3.0000 1.0000
+WORLD COUNTRY Name Afghanistan Zimbabwe 0.0000 10.1088 1.0000
+WORLD COUNTRY SurfaceArea 0.40 17075400.00 0.0000 4.0000 1.0042
+WORLD COUNTRY Population 0 1277558000 0.0000 4.0000 1.0575
+WORLD COUNTRY Capital 1 4074 0.0293 4.0000 1.0000
+WORLD CITY ID 1 4079 0.0000 4.0000 1.0000
+WORLD CITY Name A Coruña (La Coruña) Ürgenc 0.0000 8.6416 1.0195
+WORLD CITY Country ABW ZWE 0.0000 3.0000 17.5819
+WORLD CITY Population 42 10500000 0.0000 4.0000 1.0467
+WORLD COUNTRYLANGUAGE Country ABW ZWE 0.0000 3.0000 4.2232
+WORLD COUNTRYLANGUAGE Language Abhyasi [South]Mande 0.0000 7.1778 2.1532
+WORLD COUNTRYLANGUAGE Percentage 0.0 99.9 0.0000 4.0000 2.7640
+SELECT UPPER(db_name), UPPER(table_name),
+index_name, prefix_arity, avg_frequency
+FROM mysql.index_stats;
+UPPER(db_name) UPPER(table_name) index_name prefix_arity avg_frequency
+WORLD COUNTRY PRIMARY 1 1.0000
+WORLD COUNTRY Name 1 1.0000
+WORLD CITY PRIMARY 1 1.0000
+WORLD CITY Population 1 1.0467
+WORLD CITY Country 1 17.5819
+WORLD COUNTRYLANGUAGE PRIMARY 1 4.2232
+WORLD COUNTRYLANGUAGE PRIMARY 2 1.0000
+WORLD COUNTRYLANGUAGE Percentage 1 2.7640
+use test;
+set use_stat_tables='never';
+CREATE DATABASE world_innodb;
+use world_innodb;
+CREATE TABLE Country (
+Code char(3) NOT NULL default '',
+Name char(52) NOT NULL default '',
+SurfaceArea float(10,2) NOT NULL default '0.00',
+Population int(11) NOT NULL default '0',
+Capital int(11) default NULL,
+PRIMARY KEY (Code),
+UNIQUE INDEX (Name)
+) CHARACTER SET utf8 COLLATE utf8_bin;
+CREATE TABLE City (
+ID int(11) NOT NULL auto_increment,
+Name char(35) NOT NULL default '',
+Country char(3) NOT NULL default '',
+Population int(11) NOT NULL default '0',
+PRIMARY KEY (ID),
+INDEX (Population),
+INDEX (Country)
+) CHARACTER SET utf8 COLLATE utf8_bin;
+CREATE TABLE CountryLanguage (
+Country char(3) NOT NULL default '',
+Language char(30) NOT NULL default '',
+Percentage float(3,1) NOT NULL default '0.0',
+PRIMARY KEY (Country, Language),
+INDEX (Percentage)
+) CHARACTER SET utf8 COLLATE utf8_bin;
+ALTER TABLE Country ENGINE=InnoDB;
+ALTER TABLE City ENGINE=InnoDB;
+ALTER TABLE CountryLanguage ENGINE=InnoDB;
+set use_stat_tables='preferably';
+ANALYZE TABLE Country, City, CountryLanguage;
+SELECT UPPER(db_name), UPPER(table_name), cardinality
+FROM mysql.table_stats;
+UPPER(db_name) UPPER(table_name) cardinality
+WORLD COUNTRY 239
+WORLD CITY 4079
+WORLD COUNTRYLANGUAGE 984
+WORLD_INNODB COUNTRY 239
+WORLD_INNODB CITY 4079
+WORLD_INNODB COUNTRYLANGUAGE 984
+SELECT UPPER(db_name), UPPER(table_name),
+column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency
+FROM mysql.column_stats;
+UPPER(db_name) UPPER(table_name) column_name min_value max_value nulls_ratio avg_length avg_frequency
+WORLD COUNTRY Code ABW ZWE 0.0000 3.0000 1.0000
+WORLD COUNTRY Name Afghanistan Zimbabwe 0.0000 10.1088 1.0000
+WORLD COUNTRY SurfaceArea 0.40 17075400.00 0.0000 4.0000 1.0042
+WORLD COUNTRY Population 0 1277558000 0.0000 4.0000 1.0575
+WORLD COUNTRY Capital 1 4074 0.0293 4.0000 1.0000
+WORLD CITY ID 1 4079 0.0000 4.0000 1.0000
+WORLD CITY Name A Coruña (La Coruña) Ürgenc 0.0000 8.6416 1.0195
+WORLD CITY Country ABW ZWE 0.0000 3.0000 17.5819
+WORLD CITY Population 42 10500000 0.0000 4.0000 1.0467
+WORLD COUNTRYLANGUAGE Country ABW ZWE 0.0000 3.0000 4.2232
+WORLD COUNTRYLANGUAGE Language Abhyasi [South]Mande 0.0000 7.1778 2.1532
+WORLD COUNTRYLANGUAGE Percentage 0.0 99.9 0.0000 4.0000 2.7640
+WORLD_INNODB COUNTRY Code ABW ZWE 0.0000 3.0000 1.0000
+WORLD_INNODB COUNTRY Name Afghanistan Zimbabwe 0.0000 10.1088 1.0000
+WORLD_INNODB COUNTRY SurfaceArea 0.40 17075400.00 0.0000 4.0000 1.0042
+WORLD_INNODB COUNTRY Population 0 1277558000 0.0000 4.0000 1.0575
+WORLD_INNODB COUNTRY Capital 1 4074 0.0293 4.0000 1.0000
+WORLD_INNODB CITY ID 1 4079 0.0000 4.0000 1.0000
+WORLD_INNODB CITY Name A Coruña (La Coruña) Ürgenc 0.0000 8.6416 1.0195
+WORLD_INNODB CITY Country ABW ZWE 0.0000 3.0000 17.5819
+WORLD_INNODB CITY Population 42 10500000 0.0000 4.0000 1.0467
+WORLD_INNODB COUNTRYLANGUAGE Country ABW ZWE 0.0000 3.0000 4.2232
+WORLD_INNODB COUNTRYLANGUAGE Language Abhyasi [South]Mande 0.0000 7.1778 2.1532
+WORLD_INNODB COUNTRYLANGUAGE Percentage 0.0 99.9 0.0000 4.0000 2.7640
+SELECT UPPER(db_name), UPPER(table_name),
+index_name, prefix_arity, avg_frequency
+FROM mysql.index_stats;
+UPPER(db_name) UPPER(table_name) index_name prefix_arity avg_frequency
+WORLD COUNTRY PRIMARY 1 1.0000
+WORLD COUNTRY Name 1 1.0000
+WORLD CITY PRIMARY 1 1.0000
+WORLD CITY Population 1 1.0467
+WORLD CITY Country 1 17.5819
+WORLD COUNTRYLANGUAGE PRIMARY 1 4.2232
+WORLD COUNTRYLANGUAGE PRIMARY 2 1.0000
+WORLD COUNTRYLANGUAGE Percentage 1 2.7640
+WORLD_INNODB COUNTRY PRIMARY 1 1.0000
+WORLD_INNODB COUNTRY Name 1 1.0000
+WORLD_INNODB CITY PRIMARY 1 1.0000
+WORLD_INNODB CITY Population 1 1.0467
+WORLD_INNODB CITY Country 1 17.5819
+WORLD_INNODB COUNTRYLANGUAGE PRIMARY 1 4.2232
+WORLD_INNODB COUNTRYLANGUAGE PRIMARY 2 1.0000
+WORLD_INNODB COUNTRYLANGUAGE Percentage 1 2.7640
+use test;
+DROP DATABASE world;
+SELECT UPPER(db_name), UPPER(table_name), cardinality
+FROM mysql.table_stats;
+UPPER(db_name) UPPER(table_name) cardinality
+WORLD_INNODB COUNTRY 239
+WORLD_INNODB CITY 4079
+WORLD_INNODB COUNTRYLANGUAGE 984
+SELECT UPPER(db_name), UPPER(table_name),
+column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency
+FROM mysql.column_stats;
+UPPER(db_name) UPPER(table_name) column_name min_value max_value nulls_ratio avg_length avg_frequency
+WORLD_INNODB COUNTRY Code ABW ZWE 0.0000 3.0000 1.0000
+WORLD_INNODB COUNTRY Name Afghanistan Zimbabwe 0.0000 10.1088 1.0000
+WORLD_INNODB COUNTRY SurfaceArea 0.40 17075400.00 0.0000 4.0000 1.0042
+WORLD_INNODB COUNTRY Population 0 1277558000 0.0000 4.0000 1.0575
+WORLD_INNODB COUNTRY Capital 1 4074 0.0293 4.0000 1.0000
+WORLD_INNODB CITY ID 1 4079 0.0000 4.0000 1.0000
+WORLD_INNODB CITY Name A Coruña (La Coruña) Ürgenc 0.0000 8.6416 1.0195
+WORLD_INNODB CITY Country ABW ZWE 0.0000 3.0000 17.5819
+WORLD_INNODB CITY Population 42 10500000 0.0000 4.0000 1.0467
+WORLD_INNODB COUNTRYLANGUAGE Country ABW ZWE 0.0000 3.0000 4.2232
+WORLD_INNODB COUNTRYLANGUAGE Language Abhyasi [South]Mande 0.0000 7.1778 2.1532
+WORLD_INNODB COUNTRYLANGUAGE Percentage 0.0 99.9 0.0000 4.0000 2.7640
+SELECT UPPER(db_name), UPPER(table_name),
+index_name, prefix_arity, avg_frequency
+FROM mysql.index_stats;
+UPPER(db_name) UPPER(table_name) index_name prefix_arity avg_frequency
+WORLD_INNODB COUNTRY PRIMARY 1 1.0000
+WORLD_INNODB COUNTRY Name 1 1.0000
+WORLD_INNODB CITY PRIMARY 1 1.0000
+WORLD_INNODB CITY Population 1 1.0467
+WORLD_INNODB CITY Country 1 17.5819
+WORLD_INNODB COUNTRYLANGUAGE PRIMARY 1 4.2232
+WORLD_INNODB COUNTRYLANGUAGE PRIMARY 2 1.0000
+WORLD_INNODB COUNTRYLANGUAGE Percentage 1 2.7640
+DROP DATABASE world_innodb;
+SELECT UPPER(db_name), UPPER(table_name), cardinality
+FROM mysql.table_stats;
+UPPER(db_name) UPPER(table_name) cardinality
+SELECT UPPER(db_name), UPPER(table_name),
+column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency
+FROM mysql.column_stats;
+UPPER(db_name) UPPER(table_name) column_name min_value max_value nulls_ratio avg_length avg_frequency
+SELECT UPPER(db_name), UPPER(table_name),
+index_name, prefix_arity, avg_frequency
+FROM mysql.index_stats;
+UPPER(db_name) UPPER(table_name) index_name prefix_arity avg_frequency
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+set use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/r/status_user.result b/mysql-test/r/status_user.result
index f43e217b8a5..040b2d85a51 100644
--- a/mysql-test/r/status_user.result
+++ b/mysql-test/r/status_user.result
@@ -107,7 +107,7 @@ Handler_mrr_key_refills 0
Handler_mrr_rowid_refills 0
Handler_prepare 18
Handler_read_first 0
-Handler_read_key 3
+Handler_read_key 9
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
@@ -123,7 +123,7 @@ Handler_update 5
Handler_write 7
select variable_value - @global_read_key as "handler_read_key" from information_schema.global_status where variable_name="handler_read_key";
handler_read_key
-3
+9
set @@global.userstat=0;
select * from information_schema.index_statistics;
TABLE_SCHEMA TABLE_NAME INDEX_NAME ROWS_READ
diff --git a/mysql-test/r/str_to_datetime_457.result b/mysql-test/r/str_to_datetime_457.result
new file mode 100644
index 00000000000..4fd0d00691c
--- /dev/null
+++ b/mysql-test/r/str_to_datetime_457.result
@@ -0,0 +1,51 @@
+select cast('01:02:03 ' as time), cast('01:02:03 ' as time);
+cast('01:02:03 ' as time) cast('01:02:03 ' as time)
+01:02:03 00:00:00
+select cast('2002-011-012' as date), cast('2002.11.12' as date), cast('2002.011.012' as date);
+cast('2002-011-012' as date) cast('2002.11.12' as date) cast('2002.011.012' as date)
+2002-11-12 2002-11-12 2002-11-12
+select cast('2012103123595912' as datetime(6)), cast('20121031235959123' as datetime(6));
+cast('2012103123595912' as datetime(6)) cast('20121031235959123' as datetime(6))
+2012-10-31 23:59:59.000000 2012-10-31 23:59:59.000000
+Warnings:
+Warning 1292 Truncated incorrect datetime value: '2012103123595912'
+Warning 1292 Truncated incorrect datetime value: '20121031235959123'
+select cast(0 as date), cast('0000-00-00' as date), cast('0' as date);
+cast(0 as date) cast('0000-00-00' as date) cast('0' as date)
+0000-00-00 0000-00-00 NULL
+Warnings:
+Warning 1292 Incorrect datetime value: '0'
+select extract(hour from '100000:02:03'), extract(hour from '100000:02:03 ');
+extract(hour from '100000:02:03') extract(hour from '100000:02:03 ')
+NULL NULL
+Warnings:
+Warning 1292 Truncated incorrect time value: '100000:02:03'
+Warning 1292 Truncated incorrect time value: '100000:02:03 '
+#
+# backward compatibility craziness
+#
+select cast('12:00:00.12.34.56' as time);
+cast('12:00:00.12.34.56' as time)
+12:00:00
+Warnings:
+Warning 1292 Truncated incorrect time value: '12:00:00.12.34.56'
+select cast('12:00:00 12.34.56' as time);
+cast('12:00:00 12.34.56' as time)
+12:34:56
+select cast('12:00:00-12.34.56' as time);
+cast('12:00:00-12.34.56' as time)
+12:00:00
+Warnings:
+Warning 1292 Truncated incorrect time value: '12:00:00-12.34.56'
+select cast('12:00:00.12.34.56' as datetime);
+cast('12:00:00.12.34.56' as datetime)
+2012-00-00 12:34:56
+select cast('12:00:00-12.34.56' as datetime);
+cast('12:00:00-12.34.56' as datetime)
+2012-00-00 12:34:56
+select cast('12:00:00 12.34.56' as datetime);
+cast('12:00:00 12.34.56' as datetime)
+2012-00-00 12:34:56
+select cast('12:00:00.123456' as time);
+cast('12:00:00.123456' as time)
+12:00:00
diff --git a/mysql-test/r/subselect_innodb.result b/mysql-test/r/subselect_innodb.result
index a6967527a2d..8932acf8ffd 100644
--- a/mysql-test/r/subselect_innodb.result
+++ b/mysql-test/r/subselect_innodb.result
@@ -248,6 +248,47 @@ NULL
drop procedure p1;
drop tables t1,t2,t3;
#
+# Bug #58756
+# Crash in heap_rrnd on query with HAVING ... IN (subquery) + LIMIT
+#
+CREATE TABLE t1 (
+col_time_key time DEFAULT NULL,
+col_datetime_key datetime DEFAULT NULL,
+col_varchar_nokey varchar(1) DEFAULT NULL,
+KEY col_time_key (col_time_key),
+KEY col_datetime_key (col_datetime_key)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO t1 VALUES ('17:53:30','2005-11-10 12:40:29','h');
+INSERT INTO t1 VALUES ('11:35:49','2009-04-25 00:00:00','b');
+INSERT INTO t1 VALUES (NULL,'2002-11-27 00:00:00','s');
+INSERT INTO t1 VALUES ('06:01:40','2004-01-26 20:32:32','e');
+INSERT INTO t1 VALUES ('05:45:11','2007-10-26 11:41:40','j');
+INSERT INTO t1 VALUES ('00:00:00','2005-10-07 00:00:00','e');
+INSERT INTO t1 VALUES ('00:00:00','2000-07-15 05:00:34','f');
+INSERT INTO t1 VALUES ('06:11:01','2000-04-03 16:33:32','v');
+INSERT INTO t1 VALUES ('13:02:46',NULL,'x');
+INSERT INTO t1 VALUES ('21:44:25','2001-04-25 01:26:12','m');
+INSERT INTO t1 VALUES ('22:43:58','2000-12-27 00:00:00','c');
+CREATE TABLE t2 (
+col_time_key time DEFAULT NULL,
+col_datetime_key datetime DEFAULT NULL,
+col_varchar_nokey varchar(1) DEFAULT NULL,
+KEY col_time_key (col_time_key),
+KEY col_datetime_key (col_datetime_key)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO t2 VALUES ('11:28:45','2004-10-11 18:13:16','w');
+SELECT col_time_key, col_datetime_key
+FROM
+( SELECT * FROM t1 ) AS table1
+HAVING ( 'r' , 'e' ) IN
+( SELECT col_varchar_nokey , col_varchar_nokey FROM t2 )
+ORDER BY col_datetime_key
+LIMIT 10;
+col_time_key col_datetime_key
+DROP TABLE t1;
+DROP TABLE t2;
+# End of Bug #58756
+#
# Bug#60085 crash in Item::save_in_field() with time data type
#
CREATE TABLE t1(a date, b int, unique(b), unique(a), key(b)) engine=innodb;
diff --git a/mysql-test/r/subselect_mat_cost.result b/mysql-test/r/subselect_mat_cost.result
index 081196a227b..51ac98ca1a6 100644
--- a/mysql-test/r/subselect_mat_cost.result
+++ b/mysql-test/r/subselect_mat_cost.result
@@ -399,10 +399,10 @@ WHERE Code = Country GROUP BY Code)
order by Country;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY CountryLanguage index NULL PRIMARY 33 NULL 984 Using where; Using index
-3 MATERIALIZED CountryLanguage index PRIMARY PRIMARY 33 NULL 984 Using index; Using temporary
-3 MATERIALIZED Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using index
-2 MATERIALIZED CountryLanguage index PRIMARY PRIMARY 33 NULL 984 Using index; Using temporary
-2 MATERIALIZED Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using index
+3 MATERIALIZED Country index PRIMARY PRIMARY 3 NULL 239 Using index
+3 MATERIALIZED CountryLanguage ref PRIMARY PRIMARY 3 world.Country.Code 4 Using index
+2 MATERIALIZED Country index PRIMARY PRIMARY 3 NULL 239 Using index
+2 MATERIALIZED CountryLanguage ref PRIMARY PRIMARY 3 world.Country.Code 4 Using index
select count(*)
from CountryLanguage
where
diff --git a/mysql-test/r/symlink.result b/mysql-test/r/symlink.result
index 20736aec47f..279af1e44d2 100644
--- a/mysql-test/r/symlink.result
+++ b/mysql-test/r/symlink.result
@@ -120,7 +120,7 @@ CREATE TABLE t2(a INT)
DATA DIRECTORY='TEST_DIR/tmp'
INDEX DIRECTORY='TEST_DIR/tmp';
RENAME TABLE t2 TO t1;
-ERROR HY000: Can't create/write to file 'TEST_DIR/tmp/t1.MYI' (Errcode: 17)
+ERROR HY000: Can't create/write to file 'TEST_DIR/tmp/t1.MYI' (Errcode: 17 "File exists")
DROP TABLE t2;
create temporary table t1 (a int) engine=myisam data directory="MYSQLTEST_VARDIR/log" select 9 a;
show create table t1;
@@ -167,7 +167,7 @@ INDEX DIRECTORY='MYSQLD_DATADIR';
DROP TABLE IF EXISTS t1;
CREATE TABLE t1(a INT)
INDEX DIRECTORY='TEST_DIR/master-data_var';
-ERROR HY000: Can't create/write to file 'TEST_DIR/master-data_var/t1.MYI' (Errcode: 2)
+ERROR HY000: Can't create/write to file 'TEST_DIR/master-data_var/t1.MYI' (Errcode: 2 "No such file or directory")
SET @OLD_SQL_MODE=@@SQL_MODE, @@SQL_MODE='NO_DIR_IN_CREATE';
CREATE TABLE t1(a INT) DATA DIRECTORY='MYSQLTEST_VARDIR/tmp' INDEX DIRECTORY='MYSQLTEST_VARDIR/tmp';
Warnings:
diff --git a/mysql-test/r/system_mysql_db.result b/mysql-test/r/system_mysql_db.result
index d5409136ed4..4bd5c45d944 100644
--- a/mysql-test/r/system_mysql_db.result
+++ b/mysql-test/r/system_mysql_db.result
@@ -1,5 +1,6 @@
show tables;
Tables_in_db
+column_stats
columns_priv
db
event
@@ -10,6 +11,7 @@ help_keyword
help_relation
help_topic
host
+index_stats
ndb_binlog_index
plugin
proc
@@ -17,6 +19,7 @@ procs_priv
proxies_priv
servers
slow_log
+table_stats
tables_priv
time_zone
time_zone_leap_second
@@ -242,7 +245,7 @@ event CREATE TABLE `event` (
show create table general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
- `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`thread_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
@@ -252,7 +255,7 @@ general_log CREATE TABLE `general_log` (
show create table slow_log;
Table Create Table
slow_log CREATE TABLE `slow_log` (
- `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`query_time` time(6) NOT NULL,
`lock_time` time(6) NOT NULL,
@@ -264,5 +267,36 @@ slow_log CREATE TABLE `slow_log` (
`server_id` int(10) unsigned NOT NULL,
`sql_text` mediumtext NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
+show create table table_stats;
+Table Create Table
+table_stats CREATE TABLE `table_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `cardinality` bigint(21) unsigned DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Tables'
+show create table column_stats;
+Table Create Table
+column_stats CREATE TABLE `column_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `column_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `min_value` varchar(255) COLLATE utf8_bin DEFAULT NULL,
+ `max_value` varchar(255) COLLATE utf8_bin DEFAULT NULL,
+ `nulls_ratio` decimal(12,4) DEFAULT NULL,
+ `avg_length` decimal(12,4) DEFAULT NULL,
+ `avg_frequency` decimal(12,4) DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`,`column_name`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Columns'
+show create table index_stats;
+Table Create Table
+index_stats CREATE TABLE `index_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `index_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `prefix_arity` int(11) unsigned NOT NULL,
+ `avg_frequency` decimal(12,4) DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`,`index_name`,`prefix_arity`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Indexes'
show tables;
Tables_in_test
diff --git a/mysql-test/r/system_mysql_db_fix40123.result b/mysql-test/r/system_mysql_db_fix40123.result
index d5409136ed4..51def0fae6c 100644
--- a/mysql-test/r/system_mysql_db_fix40123.result
+++ b/mysql-test/r/system_mysql_db_fix40123.result
@@ -1,5 +1,6 @@
show tables;
Tables_in_db
+column_stats
columns_priv
db
event
@@ -10,6 +11,7 @@ help_keyword
help_relation
help_topic
host
+index_stats
ndb_binlog_index
plugin
proc
@@ -17,6 +19,7 @@ procs_priv
proxies_priv
servers
slow_log
+table_stats
tables_priv
time_zone
time_zone_leap_second
@@ -242,7 +245,7 @@ event CREATE TABLE `event` (
show create table general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
- `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`thread_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
@@ -252,7 +255,7 @@ general_log CREATE TABLE `general_log` (
show create table slow_log;
Table Create Table
slow_log CREATE TABLE `slow_log` (
- `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`query_time` time(6) NOT NULL,
`lock_time` time(6) NOT NULL,
@@ -264,5 +267,36 @@ slow_log CREATE TABLE `slow_log` (
`server_id` int(10) unsigned NOT NULL,
`sql_text` mediumtext NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
+show create table table_stats;
+Table Create Table
+table_stats CREATE TABLE `table_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `cardinality` bigint(21) unsigned DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Tables'
+show create table column_stats;
+Table Create Table
+column_stats CREATE TABLE `column_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `column_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `min_value` varchar(255) COLLATE utf8_bin DEFAULT NULL,
+ `max_value` varchar(255) COLLATE utf8_bin DEFAULT NULL,
+ `nulls_ratio` double DEFAULT NULL,
+ `avg_length` double DEFAULT NULL,
+ `avg_frequency` double DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`,`column_name`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Columns'
+show create table index_stats;
+Table Create Table
+index_stats CREATE TABLE `index_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `index_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `prefix_arity` int(11) unsigned NOT NULL,
+ `avg_frequency` double DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`,`index_name`,`prefix_arity`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Indexes'
show tables;
Tables_in_test
diff --git a/mysql-test/r/system_mysql_db_fix50030.result b/mysql-test/r/system_mysql_db_fix50030.result
index d5409136ed4..51def0fae6c 100644
--- a/mysql-test/r/system_mysql_db_fix50030.result
+++ b/mysql-test/r/system_mysql_db_fix50030.result
@@ -1,5 +1,6 @@
show tables;
Tables_in_db
+column_stats
columns_priv
db
event
@@ -10,6 +11,7 @@ help_keyword
help_relation
help_topic
host
+index_stats
ndb_binlog_index
plugin
proc
@@ -17,6 +19,7 @@ procs_priv
proxies_priv
servers
slow_log
+table_stats
tables_priv
time_zone
time_zone_leap_second
@@ -242,7 +245,7 @@ event CREATE TABLE `event` (
show create table general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
- `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`thread_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
@@ -252,7 +255,7 @@ general_log CREATE TABLE `general_log` (
show create table slow_log;
Table Create Table
slow_log CREATE TABLE `slow_log` (
- `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`query_time` time(6) NOT NULL,
`lock_time` time(6) NOT NULL,
@@ -264,5 +267,36 @@ slow_log CREATE TABLE `slow_log` (
`server_id` int(10) unsigned NOT NULL,
`sql_text` mediumtext NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
+show create table table_stats;
+Table Create Table
+table_stats CREATE TABLE `table_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `cardinality` bigint(21) unsigned DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Tables'
+show create table column_stats;
+Table Create Table
+column_stats CREATE TABLE `column_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `column_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `min_value` varchar(255) COLLATE utf8_bin DEFAULT NULL,
+ `max_value` varchar(255) COLLATE utf8_bin DEFAULT NULL,
+ `nulls_ratio` double DEFAULT NULL,
+ `avg_length` double DEFAULT NULL,
+ `avg_frequency` double DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`,`column_name`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Columns'
+show create table index_stats;
+Table Create Table
+index_stats CREATE TABLE `index_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `index_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `prefix_arity` int(11) unsigned NOT NULL,
+ `avg_frequency` double DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`,`index_name`,`prefix_arity`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Indexes'
show tables;
Tables_in_test
diff --git a/mysql-test/r/system_mysql_db_fix50117.result b/mysql-test/r/system_mysql_db_fix50117.result
index d5409136ed4..51def0fae6c 100644
--- a/mysql-test/r/system_mysql_db_fix50117.result
+++ b/mysql-test/r/system_mysql_db_fix50117.result
@@ -1,5 +1,6 @@
show tables;
Tables_in_db
+column_stats
columns_priv
db
event
@@ -10,6 +11,7 @@ help_keyword
help_relation
help_topic
host
+index_stats
ndb_binlog_index
plugin
proc
@@ -17,6 +19,7 @@ procs_priv
proxies_priv
servers
slow_log
+table_stats
tables_priv
time_zone
time_zone_leap_second
@@ -242,7 +245,7 @@ event CREATE TABLE `event` (
show create table general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
- `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`thread_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
@@ -252,7 +255,7 @@ general_log CREATE TABLE `general_log` (
show create table slow_log;
Table Create Table
slow_log CREATE TABLE `slow_log` (
- `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ `start_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
`query_time` time(6) NOT NULL,
`lock_time` time(6) NOT NULL,
@@ -264,5 +267,36 @@ slow_log CREATE TABLE `slow_log` (
`server_id` int(10) unsigned NOT NULL,
`sql_text` mediumtext NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
+show create table table_stats;
+Table Create Table
+table_stats CREATE TABLE `table_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `cardinality` bigint(21) unsigned DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Tables'
+show create table column_stats;
+Table Create Table
+column_stats CREATE TABLE `column_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `column_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `min_value` varchar(255) COLLATE utf8_bin DEFAULT NULL,
+ `max_value` varchar(255) COLLATE utf8_bin DEFAULT NULL,
+ `nulls_ratio` double DEFAULT NULL,
+ `avg_length` double DEFAULT NULL,
+ `avg_frequency` double DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`,`column_name`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Columns'
+show create table index_stats;
+Table Create Table
+index_stats CREATE TABLE `index_stats` (
+ `db_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `index_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `prefix_arity` int(11) unsigned NOT NULL,
+ `avg_frequency` double DEFAULT NULL,
+ PRIMARY KEY (`db_name`,`table_name`,`index_name`,`prefix_arity`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Indexes'
show tables;
Tables_in_test
diff --git a/mysql-test/r/testdb_only.require b/mysql-test/r/testdb_only.require
deleted file mode 100644
index e717418fdb6..00000000000
--- a/mysql-test/r/testdb_only.require
+++ /dev/null
@@ -1,2 +0,0 @@
-Variable_name Value
-use extern server NO
diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result
index 6047d00c858..3310209df58 100644
--- a/mysql-test/r/trigger.result
+++ b/mysql-test/r/trigger.result
@@ -969,7 +969,7 @@ trigger_schema trigger_name event_object_schema event_object_table action_statem
test t1_bi test t1 set @a:=new.id
test t1_ai test t1 set @b:=new.id
rename table t1 to t2;
-ERROR HY000: Can't create/write to file './test/t1_ai.TRN~' (Errcode: 13)
+ERROR HY000: Can't create/write to file './test/t1_ai.TRN~' (Errcode: 13 "Permission denied")
insert into t1 values (102);
select @a, @b;
@a @b
diff --git a/mysql-test/r/type_timestamp.result b/mysql-test/r/type_timestamp.result
index 52c7f05839e..e7add0d80a7 100644
--- a/mysql-test/r/type_timestamp.result
+++ b/mysql-test/r/type_timestamp.result
@@ -148,15 +148,15 @@ ix+0
20030101000000
drop table t1;
create table t1 (t1 timestamp, t2 timestamp default now());
-ERROR HY000: Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause
+drop table t1;
create table t1 (t1 timestamp, t2 timestamp on update now());
-ERROR HY000: Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause
+drop table t1;
create table t1 (t1 timestamp, t2 timestamp default now() on update now());
-ERROR HY000: Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause
+drop table t1;
create table t1 (t1 timestamp default now(), t2 timestamp on update now());
-ERROR HY000: Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause
+drop table t1;
create table t1 (t1 timestamp on update now(), t2 timestamp default now() on update now());
-ERROR HY000: Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause
+drop table t1;
create table t1 (t1 timestamp default '2003-01-01 00:00:00', t2 datetime, t3 timestamp);
SET TIMESTAMP=1000000000;
insert into t1 values ();
diff --git a/mysql-test/r/type_timestamp_hires.result b/mysql-test/r/type_timestamp_hires.result
index 3f1e05f4870..cc2cb6a403d 100644
--- a/mysql-test/r/type_timestamp_hires.result
+++ b/mysql-test/r/type_timestamp_hires.result
@@ -63,15 +63,15 @@ a
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` timestamp(4) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
+ `a` timestamp(4) NOT NULL DEFAULT CURRENT_TIMESTAMP(4) ON UPDATE CURRENT_TIMESTAMP(4)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
show columns from t1;
Field Type Null Key Default Extra
-a timestamp(4) NO CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP
+a timestamp(4) NO CURRENT_TIMESTAMP(4) on update CURRENT_TIMESTAMP
select table_name, column_name, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, datetime_precision, character_set_name, collation_name, column_type, column_key, extra from information_schema.columns where table_name='t1';
table_name t1
column_name a
-column_default CURRENT_TIMESTAMP
+column_default CURRENT_TIMESTAMP(4)
is_nullable NO
data_type timestamp
character_maximum_length NULL
@@ -113,7 +113,7 @@ t2 CREATE TABLE `t2` (
show create table t3;
Table Create Table
t3 CREATE TABLE `t3` (
- `a` timestamp(4) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
+ `a` timestamp(4) NOT NULL DEFAULT CURRENT_TIMESTAMP(4) ON UPDATE CURRENT_TIMESTAMP(4)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
drop table t2, t3;
insert t1 values ('2010-12-13 14:15:16.222222');
@@ -278,3 +278,23 @@ select * from t1;
a
2011-01-01 01:01:01.12345
drop table t1;
+create table t1 (a timestamp(5) default current_timestamp);
+drop table t1;
+create table t1 (a timestamp(5) default current_timestamp());
+drop table t1;
+create table t1 (a timestamp(5) default current_timestamp(2));
+ERROR 42000: Invalid default value for 'a'
+create table t1 (a timestamp(5) default current_timestamp(5));
+drop table t1;
+create table t1 (a timestamp(5) default current_timestamp(6));
+drop table t1;
+create table t1 (a timestamp(5) on update current_timestamp);
+drop table t1;
+create table t1 (a timestamp(5) on update current_timestamp());
+drop table t1;
+create table t1 (a timestamp(5) on update current_timestamp(3));
+ERROR HY000: Invalid ON UPDATE clause for 'a' column
+create table t1 (a timestamp(5) on update current_timestamp(5));
+drop table t1;
+create table t1 (a timestamp(5) on update current_timestamp(6));
+drop table t1;
diff --git a/mysql-test/r/user_var.result b/mysql-test/r/user_var.result
index 9c4fd02fcdd..6cbbe934bde 100644
--- a/mysql-test/r/user_var.result
+++ b/mysql-test/r/user_var.result
@@ -530,4 +530,9 @@ SELECT @a;
@a
1
DROP TABLE t1;
+#
+# Check that used memory extends if we set a variable
+#
+set @var= repeat('a',20000);
+1
End of 5.5 tests
diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result
index e055a333faf..6ec9d77230b 100644
--- a/mysql-test/r/variables.result
+++ b/mysql-test/r/variables.result
@@ -211,10 +211,10 @@ VARIABLE_NAME VARIABLE_VALUE
DEFAULT_STORAGE_ENGINE MEMORY
show global variables like 'default_storage_engine';
Variable_name Value
-default_storage_engine MRG_MYISAM
+default_storage_engine MRG_MyISAM
select * from information_schema.global_variables where variable_name like 'default_storage_engine';
VARIABLE_NAME VARIABLE_VALUE
-DEFAULT_STORAGE_ENGINE MRG_MYISAM
+DEFAULT_STORAGE_ENGINE MRG_MyISAM
set GLOBAL myisam_max_sort_file_size=2000000;
Warnings:
Warning 1292 Truncated incorrect myisam_max_sort_file_size value: '2000000'
diff --git a/mysql-test/r/windows.require b/mysql-test/r/windows.require
deleted file mode 100644
index 09aae1ed1d0..00000000000
--- a/mysql-test/r/windows.require
+++ /dev/null
@@ -1,2 +0,0 @@
-TRUE
-1
diff --git a/mysql-test/r/xa_binlog.result b/mysql-test/r/xa_binlog.result
index 3ce64953902..395f0dc62a4 100644
--- a/mysql-test/r/xa_binlog.result
+++ b/mysql-test/r/xa_binlog.result
@@ -18,7 +18,7 @@ a
1
2
3
-SHOW BINLOG EVENTS LIMIT 1,9;
+SHOW BINLOG EVENTS LIMIT 2,9;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # BEGIN
master-bin.000001 # Query 1 # use `test`; INSERT INTO t1 VALUES (1)
diff --git a/mysql-test/std_data/onerow.xml b/mysql-test/std_data/onerow.xml
new file mode 100644
index 00000000000..094dd813b2d
--- /dev/null
+++ b/mysql-test/std_data/onerow.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0"?>
+<mysqldump xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+<database name="test">
+ <table_structure name="onerow">
+ <field Field="a" Type="int(11)" Null="YES" Key="" Extra="" />
+ </table_structure>
+ <table_data name="onerow">
+ <row>
+ <field name="a">1</field>
+ </row>
+ </table_data>
+</database>
+</mysqldump>
diff --git a/mysql-test/suite/archive/archive.result b/mysql-test/suite/archive/archive.result
index b4c4aab621d..98608a95f37 100644
--- a/mysql-test/suite/archive/archive.result
+++ b/mysql-test/suite/archive/archive.result
@@ -12754,7 +12754,7 @@ DROP TABLE t1;
CREATE TABLE t1(a INT) ENGINE=ARCHIVE;
FLUSH TABLE t1;
SELECT * FROM t1;
-ERROR HY000: Can't find file: 't1' (errno: 2)
+ERROR HY000: Can't find file: 't1' (errno: 2 "No such file or directory")
DROP TABLE t1;
ERROR 42S02: Unknown table 't1'
#
diff --git a/mysql-test/suite/binlog/r/binlog_checkpoint.result b/mysql-test/suite/binlog/r/binlog_checkpoint.result
new file mode 100644
index 00000000000..7bfa66e4770
--- /dev/null
+++ b/mysql-test/suite/binlog/r/binlog_checkpoint.result
@@ -0,0 +1,94 @@
+SET @old_max_binlog_size= @@global.max_binlog_size;
+SET GLOBAL max_binlog_size= 4096;
+SET @old_innodb_flush_log_at_trx_commit= @@global.innodb_flush_log_at_trx_commit;
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
+CREATE TABLE t2 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Myisam;
+*** Test that RESET MASTER waits for pending commit checkpoints to complete.
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con1_ready WAIT_FOR con1_go";
+INSERT INTO t1 VALUES (1, REPEAT("x", 4100));
+SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
+INSERT INTO t2 VALUES (1, REPEAT("x", 4100));
+INSERT INTO t2 VALUES (2, REPEAT("x", 4100));
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+master-bin.000002 #
+master-bin.000003 #
+master-bin.000004 #
+show binlog events in 'master-bin.00000<binlog_start>' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.00000<binlog_start> # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.00000<binlog_start> # Binlog_checkpoint # # master-bin.000001
+SET DEBUG_SYNC= "execute_command_after_close_tables SIGNAL reset_master_done";
+RESET MASTER;
+This will timeout, as RESET MASTER is blocked
+SET DEBUG_SYNC= "now WAIT_FOR reset_master_done TIMEOUT 1";
+Warnings:
+Warning 1639 debug sync point wait timed out
+SET DEBUG_SYNC= "now SIGNAL con1_go";
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+show binlog events in 'master-bin.000001' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000001 # Binlog_checkpoint # # master-bin.000001
+*** Test that binlog N is active, and commit checkpoint for (N-1) is
+*** done while there is still a pending commit checkpoint for (N-2).
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con1_ready WAIT_FOR con1_continue";
+INSERT INTO t1 VALUES (20, REPEAT("x", 4100));
+SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con2_ready WAIT_FOR con2_continue";
+INSERT INTO t1 VALUES (21, REPEAT("x", 4100));
+SET DEBUG_SYNC= "now WAIT_FOR con2_ready";
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+master-bin.000002 #
+master-bin.000003 #
+show binlog events in 'master-bin.000001' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000001 # Binlog_checkpoint # # master-bin.000001
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Table_map # # table_id: # (test.t1)
+master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Xid # # COMMIT /* XID */
+master-bin.000001 # Rotate # # master-bin.000002;pos=<binlog_start>
+show binlog events in 'master-bin.000002' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000002 # Binlog_checkpoint # # master-bin.000001
+master-bin.000002 # Query # # BEGIN
+master-bin.000002 # Table_map # # table_id: # (test.t1)
+master-bin.000002 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000002 # Xid # # COMMIT /* XID */
+master-bin.000002 # Rotate # # master-bin.000003;pos=<binlog_start>
+show binlog events in 'master-bin.000003' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000003 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000003 # Binlog_checkpoint # # master-bin.000001
+SET DEBUG_SYNC= "RESET";
+SET @old_dbug= @@global.DEBUG_DBUG;
+SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed";
+SET DEBUG_SYNC= "now SIGNAL con2_continue";
+con1 is still pending, no new binlog checkpoint should have been logged.
+SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
+SET GLOBAL debug_dbug= @old_dbug;
+SET DEBUG_SYNC= "RESET";
+show binlog events in 'master-bin.000003' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000003 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000003 # Binlog_checkpoint # # master-bin.000001
+SET DEBUG_SYNC= "now SIGNAL con1_continue";
+No commit checkpoints are pending, a new binlog checkpoint should have been logged.
+show binlog events in 'master-bin.000003' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000003 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000003 # Binlog_checkpoint # # master-bin.000001
+master-bin.000003 # Binlog_checkpoint # # master-bin.000003
+DROP TABLE t1, t2;
+SET GLOBAL max_binlog_size= @old_max_binlog_size;
+SET GLOBAL innodb_flush_log_at_trx_commit= @old_innodb_flush_log_at_trx_commit;
diff --git a/mysql-test/suite/binlog/r/binlog_index.result b/mysql-test/suite/binlog/r/binlog_index.result
index ca92cc5398b..8cdca861737 100644
--- a/mysql-test/suite/binlog/r/binlog_index.result
+++ b/mysql-test/suite/binlog/r/binlog_index.result
@@ -134,7 +134,7 @@ master-bin.000011
# fault_injection_registering_index
SET SESSION debug_dbug="+d,fault_injection_registering_index";
flush logs;
-ERROR HY000: Can't open file: 'master-bin.000012' (errno: 1)
+ERROR HY000: Can't open file: 'master-bin.000012' (errno: 1 "Operation not permitted")
SET @index=LOAD_FILE('MYSQLTEST_VARDIR/mysqld.1/data//master-bin.index');
SELECT @index;
@index
@@ -159,7 +159,7 @@ master-bin.000012
# fault_injection_updating_index
SET SESSION debug_dbug="+d,fault_injection_updating_index";
flush logs;
-ERROR HY000: Can't open file: 'master-bin.000013' (errno: 1)
+ERROR HY000: Can't open file: 'master-bin.000013' (errno: 1 "Operation not permitted")
SET @index=LOAD_FILE('MYSQLTEST_VARDIR/mysqld.1/data//master-bin.index');
SELECT @index;
@index
diff --git a/mysql-test/suite/binlog/r/binlog_ioerr.result b/mysql-test/suite/binlog/r/binlog_ioerr.result
index 9dd927b9299..68ff5264aa3 100644
--- a/mysql-test/suite/binlog/r/binlog_ioerr.result
+++ b/mysql-test/suite/binlog/r/binlog_ioerr.result
@@ -4,9 +4,9 @@ CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=innodb;
INSERT INTO t1 VALUES(0);
SET SESSION debug_dbug='+d,fail_binlog_write_1';
INSERT INTO t1 VALUES(1);
-ERROR HY000: Error writing file 'master-bin' (errno: 28)
+ERROR HY000: Error writing file 'master-bin' (errno: 28 "No space left on device")
INSERT INTO t1 VALUES(2);
-ERROR HY000: Error writing file 'master-bin' (errno: 28)
+ERROR HY000: Error writing file 'master-bin' (errno: 28 "No space left on device")
SET SESSION debug_dbug='';
INSERT INTO t1 VALUES(3);
SELECT * FROM t1;
@@ -16,6 +16,7 @@ a
SHOW BINLOG EVENTS;
Log_name Pos Event_type Server_id End_log_pos Info
BINLOG POS Format_desc 1 ENDPOS Server ver: #, Binlog ver: #
+BINLOG POS Binlog_checkpoint 1 ENDPOS master-bin.000001
BINLOG POS Query 1 ENDPOS use `test`; CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=innodb
BINLOG POS Query 1 ENDPOS BEGIN
BINLOG POS Query 1 ENDPOS use `test`; INSERT INTO t1 VALUES(0)
diff --git a/mysql-test/suite/binlog/r/binlog_mdev342.result b/mysql-test/suite/binlog/r/binlog_mdev342.result
index 0e1d8f8ac78..6ec6dcd783b 100644
--- a/mysql-test/suite/binlog/r/binlog_mdev342.result
+++ b/mysql-test/suite/binlog/r/binlog_mdev342.result
@@ -21,6 +21,7 @@ master-bin.000002 #
show binlog events in 'master-bin.000001' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000001 # Binlog_checkpoint # # master-bin.000001
master-bin.000001 # Query # # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb
master-bin.000001 # Query # # BEGIN
master-bin.000001 # Table_map # # table_id: # (test.t1)
diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog2.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog2.result
index 806cf74479e..bf65bab602d 100644
--- a/mysql-test/suite/binlog/r/binlog_mysqlbinlog2.result
+++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog2.result
@@ -697,7 +697,6 @@ SET @@session.lc_time_names=0/*!*/;
SET @@session.collation_database=DEFAULT/*!*/;
BEGIN
/*!*/;
-SET INSERT_ID=6/*!*/;
DELIMITER ;
# End of log file
ROLLBACK /* added by mysqlbinlog */;
@@ -1483,17 +1482,6 @@ COMMIT
/*!*/;
DELIMITER ;
DELIMITER /*!*/;
-SET TIMESTAMP=1579609943/*!*/;
-SET @@session.pseudo_thread_id=999999999/*!*/;
-SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=0, @@session.unique_checks=1, @@session.autocommit=1/*!*/;
-SET @@session.sql_mode=0/*!*/;
-SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
-/*!\C latin1 *//*!*/;
-SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8/*!*/;
-SET @@session.lc_time_names=0/*!*/;
-SET @@session.collation_database=DEFAULT/*!*/;
-BEGIN
-/*!*/;
DELIMITER ;
# End of log file
ROLLBACK /* added by mysqlbinlog */;
diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row.result
index 84a6d6c3baf..8f481b25251 100644
--- a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row.result
+++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row.result
@@ -335,6 +335,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id 1 end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
use `test`/*!*/;
SET TIMESTAMP=1000000000/*!*/;
diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_innodb.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_innodb.result
index 31d7ee0df98..3227ad3d661 100644
--- a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_innodb.result
+++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_innodb.result
@@ -2252,6 +2252,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id 1 end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
use `test`/*!*/;
SET TIMESTAMP=1000000000/*!*/;
@@ -3875,6 +3877,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id 1 end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
use `test`/*!*/;
SET TIMESTAMP=1000000000/*!*/;
@@ -4242,6 +4246,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id 1 end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
use `test`/*!*/;
SET TIMESTAMP=1000000000/*!*/;
@@ -4803,6 +4809,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id 1 end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
use `test`/*!*/;
SET TIMESTAMP=1000000000/*!*/;
diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_myisam.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_myisam.result
index c268d20c87d..8338e48459a 100644
--- a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_myisam.result
+++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_myisam.result
@@ -2252,6 +2252,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id 1 end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
use `test`/*!*/;
SET TIMESTAMP=1000000000/*!*/;
@@ -3897,6 +3899,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id 1 end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
use `test`/*!*/;
SET TIMESTAMP=1000000000/*!*/;
@@ -4270,6 +4274,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id 1 end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
use `test`/*!*/;
SET TIMESTAMP=1000000000/*!*/;
@@ -4841,6 +4847,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id 1 end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
use `test`/*!*/;
SET TIMESTAMP=1000000000/*!*/;
diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_trans.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_trans.result
index 5d08a1095a9..f2f1ed00431 100644
--- a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_trans.result
+++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_trans.result
@@ -131,6 +131,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id 1 end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id 1 end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id 1 end_log_pos # Query thread_id=# exec_time=# error_code=0
use `test`/*!*/;
SET TIMESTAMP=1000000000/*!*/;
diff --git a/mysql-test/suite/binlog/r/binlog_row_annotate.result b/mysql-test/suite/binlog/r/binlog_row_annotate.result
index a305f39fb16..8b9ccee7a93 100644
--- a/mysql-test/suite/binlog/r/binlog_row_annotate.result
+++ b/mysql-test/suite/binlog/r/binlog_row_annotate.result
@@ -8,6 +8,7 @@
#####################################################################################
show binlog events in 'master-bin.000001' from <start_pos>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Binlog_checkpoint 1 # master-bin.000001
master-bin.000001 # Query 1 # DROP DATABASE IF EXISTS test1
master-bin.000001 # Query 1 # DROP DATABASE IF EXISTS test2
master-bin.000001 # Query 1 # DROP DATABASE IF EXISTS test3
@@ -67,6 +68,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id # end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id # end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
SET @@session.pseudo_thread_id=#/*!*/;
@@ -293,6 +296,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id # end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id # end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
SET @@session.pseudo_thread_id=#/*!*/;
@@ -437,6 +442,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id # end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id # end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
SET @@session.pseudo_thread_id=#/*!*/;
@@ -653,6 +660,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id # end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id # end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
SET @@session.pseudo_thread_id=#/*!*/;
@@ -879,6 +888,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id # end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id # end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
SET @@session.pseudo_thread_id=#/*!*/;
@@ -1023,6 +1034,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id # end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id # end_log_pos # Binlog checkpoint master-bin.000001
+# at #
#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
SET @@session.pseudo_thread_id=#/*!*/;
diff --git a/mysql-test/suite/binlog/r/binlog_row_binlog.result b/mysql-test/suite/binlog/r/binlog_row_binlog.result
index a3ee21b9957..441cfd81a91 100644
--- a/mysql-test/suite/binlog/r/binlog_row_binlog.result
+++ b/mysql-test/suite/binlog/r/binlog_row_binlog.result
@@ -234,6 +234,7 @@ master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Rotate # # master-bin.000002;pos=4
show binlog events in 'master-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Binlog_checkpoint # # master-bin.000002
master-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
set @ac = @@autocommit;
set autocommit= 0;
@@ -751,6 +752,7 @@ BINLOG '
SHOW BINLOG EVENTS;
Log_name Pos Event_type Server_id End_log_pos Info
# # Format_desc 1 # Server ver: #, Binlog ver: #
+# # Binlog_checkpoint 1 # master-bin.000001
# # Query 1 # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY)
# # Query 1 # BEGIN
# # Table_map 1 # table_id: # (test.t1)
diff --git a/mysql-test/suite/binlog/r/binlog_row_mysqlbinlog_options.result b/mysql-test/suite/binlog/r/binlog_row_mysqlbinlog_options.result
index 08919db5ed1..f2dedaa633a 100644
--- a/mysql-test/suite/binlog/r/binlog_row_mysqlbinlog_options.result
+++ b/mysql-test/suite/binlog/r/binlog_row_mysqlbinlog_options.result
@@ -35,6 +35,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id # end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id # end_log_pos # Binlog checkpoint master-bin.000001
+# at #
use `new_test1`/*!*/;
#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
@@ -229,6 +231,8 @@ DELIMITER /*!*/;
#010909 4:46:40 server id # end_log_pos # Start: binlog v 4, server v #.##.## created 010909 4:46:40 at startup
ROLLBACK/*!*/;
# at #
+#010909 4:46:40 server id # end_log_pos # Binlog checkpoint master-bin.000001
+# at #
use `new_test1`/*!*/;
#010909 4:46:40 server id # end_log_pos # Query thread_id=# exec_time=# error_code=0
SET TIMESTAMP=1000000000/*!*/;
diff --git a/mysql-test/suite/binlog/r/binlog_server_id.result b/mysql-test/suite/binlog/r/binlog_server_id.result
index f7d778a288b..991fd6e476b 100644
--- a/mysql-test/suite/binlog/r/binlog_server_id.result
+++ b/mysql-test/suite/binlog/r/binlog_server_id.result
@@ -7,6 +7,7 @@ select @@server_id;
1
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Binlog_checkpoint 1 # master-bin.000001
master-bin.000001 # Query 1 # use `test`; DROP TABLE IF EXISTS `t1`,`t2`,`t3` /* generated by server */
master-bin.000001 # Query 1 # use `test`; create table t1 (a int)
set global server_id=2;
@@ -16,6 +17,7 @@ select @@server_id;
2
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Binlog_checkpoint 1 # master-bin.000001
master-bin.000001 # Query 1 # use `test`; DROP TABLE IF EXISTS `t1`,`t2`,`t3` /* generated by server */
master-bin.000001 # Query 1 # use `test`; create table t1 (a int)
master-bin.000001 # Query 2 # use `test`; create table t2 (b int)
@@ -26,6 +28,7 @@ select @@server_id;
3
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Binlog_checkpoint 1 # master-bin.000001
master-bin.000001 # Query 1 # use `test`; DROP TABLE IF EXISTS `t1`,`t2`,`t3` /* generated by server */
master-bin.000001 # Query 1 # use `test`; create table t1 (a int)
master-bin.000001 # Query 2 # use `test`; create table t2 (b int)
diff --git a/mysql-test/suite/binlog/r/binlog_stm_binlog.result b/mysql-test/suite/binlog/r/binlog_stm_binlog.result
index 68e76921ff3..9f3ffc2af31 100644
--- a/mysql-test/suite/binlog/r/binlog_stm_binlog.result
+++ b/mysql-test/suite/binlog/r/binlog_stm_binlog.result
@@ -145,6 +145,7 @@ master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Rotate # # master-bin.000002;pos=4
show binlog events in 'master-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Binlog_checkpoint # # master-bin.000002
master-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
set @ac = @@autocommit;
set autocommit= 0;
@@ -559,6 +560,7 @@ BINLOG '
SHOW BINLOG EVENTS;
Log_name Pos Event_type Server_id End_log_pos Info
# # Format_desc 1 # Server ver: #, Binlog ver: #
+# # Binlog_checkpoint 1 # master-bin.000001
# # Query 1 # use `test`; CREATE TABLE t1 (a INT PRIMARY KEY)
# # Query 1 # BEGIN
# # Query 1 # use `test`; INSERT INTO t1 VALUES (1)
diff --git a/mysql-test/suite/binlog/r/binlog_stm_blackhole.result b/mysql-test/suite/binlog/r/binlog_stm_blackhole.result
index e76f6b494f9..2b5bd76a338 100644
--- a/mysql-test/suite/binlog/r/binlog_stm_blackhole.result
+++ b/mysql-test/suite/binlog/r/binlog_stm_blackhole.result
@@ -175,6 +175,7 @@ set insert_id= 5;
insert into t1 values (55), (NULL);
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Binlog_checkpoint 1 # master-bin.000001
master-bin.000001 # Query 1 # use `test`; create table t1 (a int auto_increment, primary key (a)) engine=blackhole
master-bin.000001 # Query 1 # BEGIN
master-bin.000001 # Intvar 1 # INSERT_ID=1
diff --git a/mysql-test/suite/binlog/r/binlog_xa_recover.result b/mysql-test/suite/binlog/r/binlog_xa_recover.result
new file mode 100644
index 00000000000..4f57bd20690
--- /dev/null
+++ b/mysql-test/suite/binlog/r/binlog_xa_recover.result
@@ -0,0 +1,202 @@
+SET GLOBAL max_binlog_size= 4096;
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
+INSERT INTO t1 VALUES (100, REPEAT("x", 4100));
+INSERT INTO t1 VALUES (101, REPEAT("x", 4100));
+INSERT INTO t1 VALUES (102, REPEAT("x", 4100));
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con1_wait WAIT_FOR con1_cont";
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con1_ready WAIT_FOR _ever";
+INSERT INTO t1 VALUES (1, REPEAT("x", 4100));
+SET DEBUG_SYNC= "now WAIT_FOR con1_wait";
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con2_wait WAIT_FOR con2_cont";
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con2_ready WAIT_FOR _ever";
+INSERT INTO t1 VALUES (2, NULL);
+SET DEBUG_SYNC= "now WAIT_FOR con2_wait";
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con3_wait WAIT_FOR con3_cont";
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con3_ready WAIT_FOR _ever";
+INSERT INTO t1 VALUES (3, REPEAT("x", 4100));
+SET DEBUG_SYNC= "now WAIT_FOR con3_wait";
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con4_wait WAIT_FOR con4_cont";
+SET SESSION debug_dbug="+d,crash_commit_after_log";
+INSERT INTO t1 VALUES (4, NULL);
+SET DEBUG_SYNC= "now WAIT_FOR con4_wait";
+SET DEBUG_SYNC= "now SIGNAL con1_cont";
+SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
+SET DEBUG_SYNC= "now SIGNAL con2_cont";
+SET DEBUG_SYNC= "now WAIT_FOR con2_ready";
+SET DEBUG_SYNC= "now SIGNAL con3_cont";
+SET DEBUG_SYNC= "now WAIT_FOR con3_ready";
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+master-bin.000002 #
+master-bin.000003 #
+master-bin.000004 #
+master-bin.000005 #
+master-bin.000006 #
+show binlog events in 'master-bin.000003' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000003 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000003 # Binlog_checkpoint # # master-bin.000002
+master-bin.000003 # Binlog_checkpoint # # master-bin.000003
+master-bin.000003 # Query # # BEGIN
+master-bin.000003 # Table_map # # table_id: # (test.t1)
+master-bin.000003 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000003 # Xid # # COMMIT /* XID */
+master-bin.000003 # Rotate # # master-bin.00000<binlog_start>;pos=<binlog_start>
+show binlog events in 'master-bin.00000<binlog_start>' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.00000<binlog_start> # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.00000<binlog_start> # Binlog_checkpoint # # master-bin.000003
+master-bin.00000<binlog_start> # Binlog_checkpoint # # master-bin.00000<binlog_start>
+master-bin.00000<binlog_start> # Query # # BEGIN
+master-bin.00000<binlog_start> # Table_map # # table_id: # (test.t1)
+master-bin.00000<binlog_start> # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.00000<binlog_start> # Xid # # COMMIT /* XID */
+master-bin.00000<binlog_start> # Rotate # # master-bin.000005;pos=<binlog_start>
+show binlog events in 'master-bin.000005' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000005 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000005 # Binlog_checkpoint # # master-bin.00000<binlog_start>
+master-bin.000005 # Query # # BEGIN
+master-bin.000005 # Table_map # # table_id: # (test.t1)
+master-bin.000005 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000005 # Xid # # COMMIT /* XID */
+master-bin.000005 # Query # # BEGIN
+master-bin.000005 # Table_map # # table_id: # (test.t1)
+master-bin.000005 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000005 # Xid # # COMMIT /* XID */
+master-bin.000005 # Rotate # # master-bin.000006;pos=<binlog_start>
+show binlog events in 'master-bin.000006' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000006 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.000006 # Binlog_checkpoint # # master-bin.00000<binlog_start>
+PURGE BINARY LOGS TO "master-bin.000006";
+show binary logs;
+Log_name File_size
+master-bin.000004 #
+master-bin.000005 #
+master-bin.000006 #
+SET DEBUG_SYNC= "now SIGNAL con4_cont";
+Got one of the listed errors
+SELECT a FROM t1 ORDER BY a;
+a
+1
+2
+3
+4
+100
+101
+102
+Test that with multiple binlog checkpoints, recovery starts from the last one.
+SET GLOBAL max_binlog_size= 4096;
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con10_ready WAIT_FOR con10_cont";
+INSERT INTO t1 VALUES (10, REPEAT("x", 4100));
+SET DEBUG_SYNC= "now WAIT_FOR con10_ready";
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con11_ready WAIT_FOR con11_cont";
+INSERT INTO t1 VALUES (11, REPEAT("x", 4100));
+SET DEBUG_SYNC= "now WAIT_FOR con11_ready";
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con12_ready WAIT_FOR con12_cont";
+INSERT INTO t1 VALUES (12, REPEAT("x", 4100));
+SET DEBUG_SYNC= "now WAIT_FOR con12_ready";
+INSERT INTO t1 VALUES (13, NULL);
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+master-bin.000002 #
+master-bin.000003 #
+master-bin.000004 #
+show binlog events in 'master-bin.00000<binlog_start>' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.00000<binlog_start> # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.00000<binlog_start> # Binlog_checkpoint # # master-bin.000001
+master-bin.00000<binlog_start> # Query # # BEGIN
+master-bin.00000<binlog_start> # Table_map # # table_id: # (test.t1)
+master-bin.00000<binlog_start> # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.00000<binlog_start> # Xid # # COMMIT /* XID */
+SET DEBUG_SYNC= "now SIGNAL con10_cont";
+SET @old_dbug= @@global.DEBUG_DBUG;
+SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed";
+SET DEBUG_SYNC= "now SIGNAL con12_cont";
+SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
+SET GLOBAL debug_dbug= @old_dbug;
+SET DEBUG_SYNC= "now SIGNAL con11_cont";
+Checking that master-bin.000004 is the last binlog checkpoint
+show binlog events in 'master-bin.00000<binlog_start>' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.00000<binlog_start> # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.00000<binlog_start> # Binlog_checkpoint # # master-bin.000001
+master-bin.00000<binlog_start> # Query # # BEGIN
+master-bin.00000<binlog_start> # Table_map # # table_id: # (test.t1)
+master-bin.00000<binlog_start> # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.00000<binlog_start> # Xid # # COMMIT /* XID */
+master-bin.00000<binlog_start> # Binlog_checkpoint # # master-bin.000002
+master-bin.00000<binlog_start> # Binlog_checkpoint # # master-bin.00000<binlog_start>
+Now crash the server
+SET SESSION debug_dbug="+d,crash_commit_after_log";
+INSERT INTO t1 VALUES (14, NULL);
+Got one of the listed errors
+SELECT a FROM t1 ORDER BY a;
+a
+1
+2
+3
+4
+10
+11
+12
+13
+14
+100
+101
+102
+*** Check that recovery works if we crashed early during rotate, before
+*** binlog checkpoint event could be written.
+SET GLOBAL max_binlog_size= 4096;
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+INSERT INTO t1 VALUES (21, REPEAT("x", 4100));
+INSERT INTO t1 VALUES (22, REPEAT("x", 4100));
+INSERT INTO t1 VALUES (23, REPEAT("x", 4100));
+SET SESSION debug_dbug="+d,crash_before_write_checkpoint_event";
+INSERT INTO t1 VALUES (24, REPEAT("x", 4100));
+Got one of the listed errors
+SELECT a FROM t1 ORDER BY a;
+a
+1
+2
+3
+4
+10
+11
+12
+13
+14
+21
+22
+23
+24
+100
+101
+102
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+master-bin.000002 #
+master-bin.000003 #
+master-bin.000004 #
+master-bin.000005 #
+show binlog events in 'master-bin.00000<binlog_start>' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.00000<binlog_start> # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+master-bin.00000<binlog_start> # Binlog_checkpoint # # master-bin.000003
+master-bin.00000<binlog_start> # Binlog_checkpoint # # master-bin.00000<binlog_start>
+master-bin.00000<binlog_start> # Query # # BEGIN
+master-bin.00000<binlog_start> # Table_map # # table_id: # (test.t1)
+master-bin.00000<binlog_start> # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.00000<binlog_start> # Xid # # COMMIT /* XID */
+master-bin.00000<binlog_start> # Rotate # # master-bin.000005;pos=<binlog_start>
+DROP TABLE t1;
diff --git a/mysql-test/suite/binlog/t/binlog_checkpoint.test b/mysql-test/suite/binlog/t/binlog_checkpoint.test
new file mode 100644
index 00000000000..8c84e51c4df
--- /dev/null
+++ b/mysql-test/suite/binlog/t/binlog_checkpoint.test
@@ -0,0 +1,120 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+--source include/have_binlog_format_row.inc
+
+SET @old_max_binlog_size= @@global.max_binlog_size;
+SET GLOBAL max_binlog_size= 4096;
+SET @old_innodb_flush_log_at_trx_commit= @@global.innodb_flush_log_at_trx_commit;
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
+CREATE TABLE t2 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Myisam;
+
+--echo *** Test that RESET MASTER waits for pending commit checkpoints to complete.
+
+# con1 will hang before doing commit checkpoint, blocking RESET MASTER.
+connect(con1,localhost,root,,);
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con1_ready WAIT_FOR con1_go";
+send INSERT INTO t1 VALUES (1, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
+# Let's add a few binlog rotations just for good measure.
+INSERT INTO t2 VALUES (1, REPEAT("x", 4100));
+INSERT INTO t2 VALUES (2, REPEAT("x", 4100));
+--source include/show_binary_logs.inc
+--let $binlog_file= master-bin.000004
+--let $binlog_start= 4
+--source include/show_binlog_events.inc
+SET DEBUG_SYNC= "execute_command_after_close_tables SIGNAL reset_master_done";
+send RESET MASTER;
+
+connect(con2,localhost,root,,);
+--echo This will timeout, as RESET MASTER is blocked
+SET DEBUG_SYNC= "now WAIT_FOR reset_master_done TIMEOUT 1";
+# Wake up transaction to allow RESET MASTER to complete.
+SET DEBUG_SYNC= "now SIGNAL con1_go";
+
+connection con1;
+reap;
+
+connection default;
+reap;
+--source include/show_binary_logs.inc
+--let $binlog_file= master-bin.000001
+--let $binlog_start= 4
+--source include/show_binlog_events.inc
+
+--echo *** Test that binlog N is active, and commit checkpoint for (N-1) is
+--echo *** done while there is still a pending commit checkpoint for (N-2).
+
+connection con1;
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con1_ready WAIT_FOR con1_continue";
+send INSERT INTO t1 VALUES (20, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
+
+connection con2;
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con2_ready WAIT_FOR con2_continue";
+send INSERT INTO t1 VALUES (21, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con2_ready";
+--source include/show_binary_logs.inc
+--let $binlog_file= master-bin.000001
+--source include/show_binlog_events.inc
+--let $binlog_file= master-bin.000002
+--source include/show_binlog_events.inc
+--let $binlog_file= master-bin.000003
+--source include/show_binlog_events.inc
+
+# We need to sync the test case with the background processing of the
+# commit checkpoint, otherwise we get nondeterministic results.
+SET DEBUG_SYNC= "RESET";
+SET @old_dbug= @@global.DEBUG_DBUG;
+SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed";
+
+SET DEBUG_SYNC= "now SIGNAL con2_continue";
+
+connection con2;
+reap;
+
+connection default;
+--echo con1 is still pending, no new binlog checkpoint should have been logged.
+# Make sure commit checkpoint is processed before we check that no checkpoint
+# event has been binlogged.
+SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
+SET GLOBAL debug_dbug= @old_dbug;
+SET DEBUG_SYNC= "RESET";
+
+--let $binlog_file= master-bin.000003
+--source include/show_binlog_events.inc
+
+SET DEBUG_SYNC= "now SIGNAL con1_continue";
+
+connection con1;
+reap;
+
+connection default;
+
+--echo No commit checkpoints are pending, a new binlog checkpoint should have been logged.
+--let $binlog_file= master-bin.000003
+
+# Wait for the master-bin.000003 binlog checkpoint to appear.
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "$binlog_file"
+--let $field= Info
+--let $condition= = "master-bin.000003"
+--source include/wait_show_condition.inc
+
+--source include/show_binlog_events.inc
+
+
+# Cleanup
+connection default;
+DROP TABLE t1, t2;
+SET GLOBAL max_binlog_size= @old_max_binlog_size;
+SET GLOBAL innodb_flush_log_at_trx_commit= @old_innodb_flush_log_at_trx_commit;
diff --git a/mysql-test/suite/binlog/t/binlog_killed.test b/mysql-test/suite/binlog/t/binlog_killed.test
index 9b6420df4b4..ff558ee2948 100644
--- a/mysql-test/suite/binlog/t/binlog_killed.test
+++ b/mysql-test/suite/binlog/t/binlog_killed.test
@@ -59,8 +59,8 @@ reap;
let $rows= `select count(*) from t2 /* must be 2 or 0 */`;
let $MYSQLD_DATADIR= `select @@datadir`;
---let $binlog_killed_pos=query_get_value(SHOW BINLOG EVENTS, Pos, 4)
---let $binlog_killed_end_log_pos=query_get_value(SHOW BINLOG EVENTS, End_log_pos, 4)
+--let $binlog_killed_pos=query_get_value(SHOW BINLOG EVENTS, Pos, 5)
+--let $binlog_killed_end_log_pos=query_get_value(SHOW BINLOG EVENTS, End_log_pos, 5)
--exec $MYSQL_BINLOG --force-if-open --start-position=$binlog_killed_pos --stop-position=$binlog_killed_end_log_pos $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/kill_query_calling_sp.binlog
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--disable_result_log
@@ -270,8 +270,8 @@ select * from t4 order by b /* must be (1,1), (1,2) */;
select @b /* must be 1 at the end of a stmt calling bug27563() */;
--echo must have the update query event on the 4th line
source include/show_binlog_events.inc;
---let $binlog_killed_pos= query_get_value(SHOW BINLOG EVENTS, Pos, 4)
---let $binlog_killed_end_log_pos= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 4)
+--let $binlog_killed_pos= query_get_value(SHOW BINLOG EVENTS, Pos, 5)
+--let $binlog_killed_end_log_pos= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 5)
--echo *** a proof the query is binlogged with an error ***
@@ -318,8 +318,8 @@ select count(*) from t4 /* must be 1 */;
select @b /* must be 1 at the end of a stmt calling bug27563() */;
--echo must have the delete query event on the 4th line
source include/show_binlog_events.inc;
---let $binlog_killed_pos= query_get_value(SHOW BINLOG EVENTS, Pos, 4)
---let $binlog_killed_end_log_pos= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 4)
+--let $binlog_killed_pos= query_get_value(SHOW BINLOG EVENTS, Pos, 5)
+--let $binlog_killed_end_log_pos= query_get_value(SHOW BINLOG EVENTS, End_log_pos, 5)
# a proof the query is binlogged with an error
diff --git a/mysql-test/suite/binlog/t/binlog_killed_simulate.test b/mysql-test/suite/binlog/t/binlog_killed_simulate.test
index ba111fd0145..33037710379 100644
--- a/mysql-test/suite/binlog/t/binlog_killed_simulate.test
+++ b/mysql-test/suite/binlog/t/binlog_killed_simulate.test
@@ -50,8 +50,8 @@ load data infile '../../std_data/rpl_loaddata.dat' into table t2 /* will be "kil
# a proof the query is binlogged with an error
---let $binlog_load_data= query_get_value(SHOW BINLOG EVENTS, Pos, 3)
---let $binlog_end= query_get_value(SHOW BINLOG EVENTS, Pos, 4)
+--let $binlog_load_data= query_get_value(SHOW BINLOG EVENTS, Pos, 4)
+--let $binlog_end= query_get_value(SHOW BINLOG EVENTS, Pos, 5)
source include/show_binlog_events.inc;
--exec $MYSQL_BINLOG --force-if-open --start-position=$binlog_load_data --stop-position=$binlog_end $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/binlog_killed_bug27571.binlog
diff --git a/mysql-test/suite/binlog/t/binlog_mysqlbinlog2.test b/mysql-test/suite/binlog/t/binlog_mysqlbinlog2.test
index 740c4078f20..986e180db13 100644
--- a/mysql-test/suite/binlog/t/binlog_mysqlbinlog2.test
+++ b/mysql-test/suite/binlog/t/binlog_mysqlbinlog2.test
@@ -23,7 +23,7 @@ insert into t1 values(null, "b");
set timestamp=@a+2;
--let $binlog_pos_760=query_get_value(SHOW MASTER STATUS, Position, 1)
insert into t1 values(null, "c");
---let $binlog_pos_951=query_get_value(SHOW BINLOG EVENTS in 'master-bin.000001' from $binlog_pos_760, Pos, 4)
+--let $binlog_pos_951=query_get_value(SHOW BINLOG EVENTS in 'master-bin.000001' from $binlog_pos_760, Pos, 5)
set timestamp=@a+4;
insert into t1 values(null, "d");
insert into t1 values(null, "e");
@@ -31,8 +31,8 @@ insert into t1 values(null, "e");
flush logs;
set timestamp=@a+1; # this could happen on a slave
insert into t1 values(null, "f");
---let $binlog_pos_135=query_get_value(SHOW BINLOG EVENTS in 'master-bin.000002', Pos, 3)
---let $binlog_pos_203=query_get_value(SHOW BINLOG EVENTS in 'master-bin.000002', Pos, 4)
+--let $binlog_pos_135=query_get_value(SHOW BINLOG EVENTS in 'master-bin.000002', Pos, 4)
+--let $binlog_pos_203=query_get_value(SHOW BINLOG EVENTS in 'master-bin.000002', Pos, 5)
# delimiters are for easier debugging in future
@@ -50,22 +50,22 @@ let $MYSQLD_DATADIR= `select @@datadir`;
--disable_query_log
select "--- offset --" as "";
--enable_query_log
---exec $MYSQL_BINLOG --short-form --offset=2 $MYSQLD_DATADIR/master-bin.000001
+--exec $MYSQL_BINLOG --short-form --offset=3 $MYSQLD_DATADIR/master-bin.000001
--disable_query_log
select "--- start-position --" as "";
--enable_query_log
-let $start_pos= `select @binlog_start_pos + 653`;
+let $start_pos= `select @binlog_start_pos + 693`;
--exec $MYSQL_BINLOG --short-form --start-position=$start_pos $MYSQLD_DATADIR/master-bin.000001
--disable_query_log
select "--- stop-position --" as "";
--enable_query_log
-let $stop_pos= `select @binlog_start_pos + 653`;
+let $stop_pos= `select @binlog_start_pos + 693`;
--exec $MYSQL_BINLOG --short-form --stop-position=$stop_pos $MYSQLD_DATADIR/master-bin.000001
--disable_query_log
select "--- start and stop positions ---" as "";
--enable_query_log
-let $start_pos= `select @binlog_start_pos + 653`;
-let $stop_pos= `select @binlog_start_pos + 770`;
+let $start_pos= `select @binlog_start_pos + 693`;
+let $stop_pos= `select @binlog_start_pos + 810`;
--exec $MYSQL_BINLOG --short-form --start-position=$start_pos --stop-position=$stop_pos $MYSQLD_DATADIR/master-bin.000001
--disable_query_log
select "--- start-datetime --" as "";
@@ -88,16 +88,16 @@ flush logs;
--disable_query_log
select "--- offset --" as "";
--enable_query_log
---exec $MYSQL_BINLOG --short-form --offset=2 $MYSQLD_DATADIR/master-bin.000001 $MYSQLD_DATADIR/master-bin.000002
+--exec $MYSQL_BINLOG --short-form --offset=3 $MYSQLD_DATADIR/master-bin.000001 $MYSQLD_DATADIR/master-bin.000002
--disable_query_log
select "--- start-position --" as "";
--enable_query_log
-let $start_pos= `select @binlog_start_pos + 653`;
+let $start_pos= `select @binlog_start_pos + 693`;
--exec $MYSQL_BINLOG --short-form --start-position=$start_pos $MYSQLD_DATADIR/master-bin.000001 $MYSQLD_DATADIR/master-bin.000002
--disable_query_log
select "--- stop-position --" as "";
--enable_query_log
-let $stop_pos= `select @binlog_start_pos + 69`;
+let $stop_pos= `select @binlog_start_pos + 109`;
--exec $MYSQL_BINLOG --short-form --stop-position=$stop_pos $MYSQLD_DATADIR/master-bin.000001 $MYSQLD_DATADIR/master-bin.000002
--disable_query_log
select "--- start-datetime --" as "";
@@ -117,22 +117,22 @@ select "--- Remote --" as "";
--disable_query_log
select "--- offset --" as "";
--enable_query_log
---exec $MYSQL_BINLOG --short-form --offset=2 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
+--exec $MYSQL_BINLOG --short-form --offset=3 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
--disable_query_log
select "--- start-position --" as "";
--enable_query_log
-let $start_pos= `select @binlog_start_pos + 653`;
+let $start_pos= `select @binlog_start_pos + 693`;
--exec $MYSQL_BINLOG --short-form --start-position=$start_pos --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
--disable_query_log
select "--- stop-position --" as "";
--enable_query_log
-let $stop_pos= `select @binlog_start_pos + 653`;
+let $stop_pos= `select @binlog_start_pos + 693`;
--exec $MYSQL_BINLOG --short-form --stop-position=$stop_pos --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
--disable_query_log
select "--- start and stop positions ---" as "";
--enable_query_log
-let $start_pos= `select @binlog_start_pos + 653`;
-let $stop_pos= `select @binlog_start_pos + 770`;
+let $start_pos= `select @binlog_start_pos + 693`;
+let $stop_pos= `select @binlog_start_pos + 810`;
--exec $MYSQL_BINLOG --short-form --start-position=$start_pos --stop-position $stop_pos --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001
--disable_query_log
select "--- start-datetime --" as "";
@@ -152,16 +152,16 @@ select "--- Remote with 2 binlogs on command line --" as "";
--disable_query_log
select "--- offset --" as "";
--enable_query_log
---exec $MYSQL_BINLOG --short-form --offset=2 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002
+--exec $MYSQL_BINLOG --short-form --offset=3 --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002
--disable_query_log
select "--- start-position --" as "";
--enable_query_log
-let $start_pos= `select @binlog_start_pos + 653`;
+let $start_pos= `select @binlog_start_pos + 693`;
--exec $MYSQL_BINLOG --short-form --start-position=$start_pos --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002
--disable_query_log
select "--- stop-position --" as "";
--enable_query_log
-let $stop_pos= `select @binlog_start_pos + 28`;
+let $stop_pos= `select @binlog_start_pos + 68`;
--exec $MYSQL_BINLOG --short-form --stop-position=$stop_pos --read-from-remote-server --user=root --host=127.0.0.1 --port=$MASTER_MYPORT master-bin.000001 master-bin.000002
--disable_query_log
select "--- start-datetime --" as "";
diff --git a/mysql-test/suite/binlog/t/binlog_xa_recover-master.opt b/mysql-test/suite/binlog/t/binlog_xa_recover-master.opt
new file mode 100644
index 00000000000..3c44f9fad10
--- /dev/null
+++ b/mysql-test/suite/binlog/t/binlog_xa_recover-master.opt
@@ -0,0 +1 @@
+--skip-stack-trace --skip-core-file --loose-debug-dbug=+d,xa_recover_expect_master_bin_000004
diff --git a/mysql-test/suite/binlog/t/binlog_xa_recover.test b/mysql-test/suite/binlog/t/binlog_xa_recover.test
new file mode 100644
index 00000000000..e46857b265c
--- /dev/null
+++ b/mysql-test/suite/binlog/t/binlog_xa_recover.test
@@ -0,0 +1,277 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+--source include/have_binlog_format_row.inc
+# Valgrind does not work well with test that crashes the server
+--source include/not_valgrind.inc
+
+# (We do not need to restore these settings, as we crash the server).
+SET GLOBAL max_binlog_size= 4096;
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+
+CREATE TABLE t1 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=Innodb;
+# Insert some data to force a couple binlog rotations (3), so we get some
+# normal binlog checkpoints before starting the test.
+INSERT INTO t1 VALUES (100, REPEAT("x", 4100));
+# Wait for the master-bin.000002 binlog checkpoint to appear.
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000002"
+--let $field= Info
+--let $condition= = "master-bin.000002"
+--source include/wait_show_condition.inc
+INSERT INTO t1 VALUES (101, REPEAT("x", 4100));
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000003"
+--let $field= Info
+--let $condition= = "master-bin.000003"
+--source include/wait_show_condition.inc
+INSERT INTO t1 VALUES (102, REPEAT("x", 4100));
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000004"
+--let $field= Info
+--let $condition= = "master-bin.000004"
+--source include/wait_show_condition.inc
+
+# Now start a bunch of transactions that span multiple binlog
+# files. Leave then in the state prepared-but-not-committed in the engine
+# and crash the server. Check that crash recovery is able to recover all
+# of them.
+#
+# We use debug_sync to get all the transactions into the prepared state before
+# we commit any of them. This is because the prepare step flushes the InnoDB
+# redo log - including any commits made before, so recovery would become
+# unnecessary, decreasing the value of this test.
+#
+# We arrange to have con1 with a prepared transaction in master-bin.000004,
+# con2 and con3 with a prepared transaction in master-bin.000005, and a new
+# empty master-bin.000006. So the latest binlog checkpoint should be
+# master-bin.000006.
+
+connect(con1,localhost,root,,);
+# First wait after prepare and before write to binlog.
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con1_wait WAIT_FOR con1_cont";
+# Then complete InnoDB commit in memory (but not commit checkpoint / write to
+# disk), and hang until crash, leaving a transaction to be XA recovered.
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con1_ready WAIT_FOR _ever";
+send INSERT INTO t1 VALUES (1, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con1_wait";
+
+connect(con2,localhost,root,,);
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con2_wait WAIT_FOR con2_cont";
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con2_ready WAIT_FOR _ever";
+send INSERT INTO t1 VALUES (2, NULL);
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con2_wait";
+
+connect(con3,localhost,root,,);
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con3_wait WAIT_FOR con3_cont";
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con3_ready WAIT_FOR _ever";
+send INSERT INTO t1 VALUES (3, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con3_wait";
+
+connect(con4,localhost,root,,);
+SET DEBUG_SYNC= "ha_commit_trans_before_log_and_order SIGNAL con4_wait WAIT_FOR con4_cont";
+SET SESSION debug_dbug="+d,crash_commit_after_log";
+send INSERT INTO t1 VALUES (4, NULL);
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con4_wait";
+
+SET DEBUG_SYNC= "now SIGNAL con1_cont";
+SET DEBUG_SYNC= "now WAIT_FOR con1_ready";
+SET DEBUG_SYNC= "now SIGNAL con2_cont";
+SET DEBUG_SYNC= "now WAIT_FOR con2_ready";
+SET DEBUG_SYNC= "now SIGNAL con3_cont";
+SET DEBUG_SYNC= "now WAIT_FOR con3_ready";
+
+# Check that everything is committed in binary log.
+--source include/show_binary_logs.inc
+--let $binlog_file= master-bin.000003
+--let $binlog_start= 4
+--source include/show_binlog_events.inc
+--let $binlog_file= master-bin.000004
+--source include/show_binlog_events.inc
+--let $binlog_file= master-bin.000005
+--source include/show_binlog_events.inc
+--let $binlog_file= master-bin.000006
+--source include/show_binlog_events.inc
+
+
+# Check that server will not purge too much.
+PURGE BINARY LOGS TO "master-bin.000006";
+--source include/show_binary_logs.inc
+
+# Now crash the server with one more transaction in prepared state.
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+wait-binlog_xa_recover.test
+EOF
+SET DEBUG_SYNC= "now SIGNAL con4_cont";
+connection con4;
+--error 2006,2013
+reap;
+
+--remove_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+restart-group_commit_binlog_pos.test
+EOF
+
+connection default;
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+# Check that all transactions are recovered.
+SELECT a FROM t1 ORDER BY a;
+
+--echo Test that with multiple binlog checkpoints, recovery starts from the last one.
+SET GLOBAL max_binlog_size= 4096;
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+
+# Rotate to binlog master-bin.000003 while delaying binlog checkpoints.
+# So we get multiple binlog checkpoints in master-bin.000003.
+# Then complete the checkpoints, crash, and check that we only scan
+# the necessary binlog file (ie. that we use the _last_ checkpoint).
+
+connect(con10,localhost,root,,);
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con10_ready WAIT_FOR con10_cont";
+send INSERT INTO t1 VALUES (10, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con10_ready";
+
+connect(con11,localhost,root,,);
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con11_ready WAIT_FOR con11_cont";
+send INSERT INTO t1 VALUES (11, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con11_ready";
+
+connect(con12,localhost,root,,);
+SET DEBUG_SYNC= "commit_after_group_release_commit_ordered SIGNAL con12_ready WAIT_FOR con12_cont";
+send INSERT INTO t1 VALUES (12, REPEAT("x", 4100));
+
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR con12_ready";
+INSERT INTO t1 VALUES (13, NULL);
+
+--source include/show_binary_logs.inc
+--let $binlog_file= master-bin.000004
+--let $binlog_start= 4
+--source include/show_binlog_events.inc
+
+SET DEBUG_SYNC= "now SIGNAL con10_cont";
+connection con10;
+reap;
+connection default;
+
+# We need to sync the test case with the background processing of the
+# commit checkpoint, otherwise we get nondeterministic results.
+SET @old_dbug= @@global.DEBUG_DBUG;
+SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed";
+
+SET DEBUG_SYNC= "now SIGNAL con12_cont";
+connection con12;
+reap;
+connection default;
+SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
+SET GLOBAL debug_dbug= @old_dbug;
+
+SET DEBUG_SYNC= "now SIGNAL con11_cont";
+connection con11;
+reap;
+
+connection default;
+# Wait for the last (master-bin.000004) binlog checkpoint to appear.
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000004"
+--let $field= Info
+--let $condition= = "master-bin.000004"
+--source include/wait_show_condition.inc
+
+--echo Checking that master-bin.000004 is the last binlog checkpoint
+--source include/show_binlog_events.inc
+
+--echo Now crash the server
+# It is not too easy to test XA recovery, as it runs early during server
+# startup, before any connections can be made.
+# What we do is set a DBUG error insert which will crash if XA recovery
+# starts from any other binlog than master-bin.000004 (check the file
+# binlog_xa_recover-master.opt). Then we will fail here if XA recovery
+# would start from the wrong place.
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+wait-binlog_xa_recover.test
+EOF
+SET SESSION debug_dbug="+d,crash_commit_after_log";
+--error 2006,2013
+INSERT INTO t1 VALUES (14, NULL);
+
+--remove_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+restart-group_commit_binlog_pos.test
+EOF
+
+connection default;
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+# Check that all transactions are recovered.
+SELECT a FROM t1 ORDER BY a;
+
+
+--echo *** Check that recovery works if we crashed early during rotate, before
+--echo *** binlog checkpoint event could be written.
+
+SET GLOBAL max_binlog_size= 4096;
+SET GLOBAL innodb_flush_log_at_trx_commit= 1;
+RESET MASTER;
+
+# We need some initial data to reach binlog master-bin.000004. Otherwise
+# crash recovery fails due to the error insert used for previous test.
+INSERT INTO t1 VALUES (21, REPEAT("x", 4100));
+INSERT INTO t1 VALUES (22, REPEAT("x", 4100));
+# Wait for the master-bin.000003 binlog checkpoint to appear.
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000003"
+--let $field= Info
+--let $condition= = "master-bin.000003"
+--source include/wait_show_condition.inc
+INSERT INTO t1 VALUES (23, REPEAT("x", 4100));
+# Wait for the last (master-bin.000004) binlog checkpoint to appear.
+--let $wait_for_all= 0
+--let $show_statement= SHOW BINLOG EVENTS IN "master-bin.000004"
+--let $field= Info
+--let $condition= = "master-bin.000004"
+--source include/wait_show_condition.inc
+
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+wait-binlog_xa_recover.test
+EOF
+SET SESSION debug_dbug="+d,crash_before_write_checkpoint_event";
+--error 2006,2013
+INSERT INTO t1 VALUES (24, REPEAT("x", 4100));
+
+--remove_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+restart-group_commit_binlog_pos.test
+EOF
+
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+# Check that all transactions are recovered.
+SELECT a FROM t1 ORDER BY a;
+
+--source include/show_binary_logs.inc
+--let $binlog_file= master-bin.000004
+--let $binlog_start= 4
+--source include/show_binlog_events.inc
+
+# Cleanup
+connection default;
+DROP TABLE t1;
diff --git a/mysql-test/suite/csv/csv.result b/mysql-test/suite/csv/csv.result
index 6b20add1bed..d81cf6e28bc 100644
--- a/mysql-test/suite/csv/csv.result
+++ b/mysql-test/suite/csv/csv.result
@@ -5197,7 +5197,7 @@ drop table t1;
create table bug15205 (val int(11) not null) engine=csv;
create table bug15205_2 (val int(11) not null) engine=csv;
select * from bug15205;
-ERROR HY000: Can't get stat of './test/bug15205.CSV' (Errcode: 2)
+ERROR HY000: Can't get stat of './test/bug15205.CSV' (Errcode: 2 "No such file or directory")
select * from bug15205_2;
val
select * from bug15205;
@@ -5379,7 +5379,7 @@ ERROR 42000: The storage engine for the table doesn't support nullable columns
SHOW WARNINGS;
Level Code Message
Error 1178 The storage engine for the table doesn't support nullable columns
-Error 1005 Can't create table 'test.t1' (errno: 138)
+Error 1005 Can't create table 'test.t1' (errno: 138 "Unsupported extension used for table")
create table t1 (c1 tinyblob not null) engine=csv;
insert into t1 values("This");
update t1 set c1="That" where c1="This";
@@ -5392,7 +5392,7 @@ drop table t1;
create table t1 (a int not null) engine=csv;
lock tables t1 read;
select * from t1;
-ERROR HY000: File 'MYSQLD_DATADIR/test/t1.CSV' not found (Errcode: 2)
+ERROR HY000: File 'MYSQLD_DATADIR/test/t1.CSV' not found (Errcode: 2 "No such file or directory")
unlock tables;
drop table t1;
CREATE TABLE t1 (e enum('foo','bar') NOT NULL) ENGINE = CSV;
diff --git a/mysql-test/suite/engines/funcs/r/tc_rename_error.result b/mysql-test/suite/engines/funcs/r/tc_rename_error.result
index 1ac32ddf010..bd1c2abc057 100644
--- a/mysql-test/suite/engines/funcs/r/tc_rename_error.result
+++ b/mysql-test/suite/engines/funcs/r/tc_rename_error.result
@@ -15,7 +15,7 @@ ERROR 42S01: Table 't1' already exists
RENAME TABLE t3 TO t1;
ERROR 42S01: Table 't1' already exists
RENAME TABLE t3 TO doesnotexist.t1;
-ERROR HY000: Can't find file: './test/t3.frm' (errno: 2)
+ERROR HY000: Can't find file: './test/t3.frm' (errno: 2 "No such file or directory")
SHOW TABLES;
Tables_in_test
t1
diff --git a/mysql-test/suite/engines/iuds/r/insert_time.result b/mysql-test/suite/engines/iuds/r/insert_time.result
index 0f588274fc1..dceba37ae8e 100644
--- a/mysql-test/suite/engines/iuds/r/insert_time.result
+++ b/mysql-test/suite/engines/iuds/r/insert_time.result
@@ -5167,7 +5167,6 @@ c1 c2 c3
825:23:00 825:23:00 2009-01-05
10:00:00 10:00:00 2009-01-06
00:00:45 00:00:45 2009-01-07
-00:00:00 00:00:00 2009-01-09
838:59:59 838:59:59 2009-01-10
10:11:12 10:11:12 2009-01-11
11:11:12 11:11:12 2009-01-12
@@ -5178,18 +5177,12 @@ c1 c2 c3
11:11:27 11:11:27 2009-01-17
08:03:02 08:03:02 2009-01-18
00:11:12 00:11:12 2009-01-19
-00:00:11 00:00:11 2009-01-20
00:12:30 00:12:30 2009-01-23
09:00:45 09:00:45 2009-01-24
09:36:00 09:36:00 2009-01-25
-00:00:10 00:00:10 2009-01-26
-00:00:00 00:00:00 2009-01-27
-00:00:00 00:00:00 2009-01-28
-00:00:00 00:00:00 2009-01-29
262:22:00 262:22:00 2009-01-30
00:00:12 00:00:12 2009-01-31
08:29:45 NULL 2009-02-01
-00:00:00 07:23:55 NULL
TRUNCATE TABLE t5;
DROP TABLE t5;
DROP TABLE t1,t2,t3,t4;
diff --git a/mysql-test/suite/federated/federated_bug_35333.result b/mysql-test/suite/federated/federated_bug_35333.result
index e1319cfeeae..74f6f6e8f02 100644
--- a/mysql-test/suite/federated/federated_bug_35333.result
+++ b/mysql-test/suite/federated/federated_bug_35333.result
@@ -24,12 +24,12 @@ CREATE TABLE t1 (c1 int) ENGINE=MYISAM;
SELECT TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, ENGINE, ROW_FORMAT, TABLE_ROWS, DATA_LENGTH, TABLE_COMMENT
FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
TABLE_SCHEMA TABLE_NAME TABLE_TYPE ENGINE ROW_FORMAT TABLE_ROWS DATA_LENGTH TABLE_COMMENT
-test t1 BASE TABLE NULL NULL NULL NULL Can't find file: 't1' (errno: 2)
+test t1 BASE TABLE NULL NULL NULL NULL Can't find file: 't1' (errno: 2 "No such file or directory")
Warnings:
-Warning 1017 Can't find file: 't1' (errno: 2)
+Warning 1017 Can't find file: 't1' (errno: 2 "No such file or directory")
SHOW WARNINGS;
Level Code Message
-Warning 1017 Can't find file: 't1' (errno: 2)
+Warning 1017 Can't find file: 't1' (errno: 2 "No such file or directory")
DROP TABLE t1;
ERROR 42S02: Unknown table 't1'
#
diff --git a/mysql-test/suite/funcs_1/datadict/processlist_priv.inc b/mysql-test/suite/funcs_1/datadict/processlist_priv.inc
index b863b98d98a..474171d175d 100644
--- a/mysql-test/suite/funcs_1/datadict/processlist_priv.inc
+++ b/mysql-test/suite/funcs_1/datadict/processlist_priv.inc
@@ -66,7 +66,7 @@
let $table= processlist;
#
# columns of the information_schema table e.g. to use in a select.
-let $columns= ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS;
+let $columns= ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS;
#
# Where clause for an update.
let $update_where= WHERE id=1 ;
@@ -159,9 +159,9 @@ WHERE DB = 'information_schema' AND COMMAND = 'Sleep' AND USER = 'ddicttestuser1
eval SHOW CREATE TABLE $table;
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
eval SHOW $table;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
eval SELECT * FROM $table $select_where ORDER BY id;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
eval SELECT $columns FROM $table $select_where ORDER BY id;
--source suite/funcs_1/datadict/datadict_priv.inc
--real_sleep 0.3
@@ -179,9 +179,9 @@ connection con100;
eval SHOW CREATE TABLE $table;
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
eval SHOW $table;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
eval SELECT * FROM $table $select_where ORDER BY id;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
eval SELECT $columns FROM $table $select_where ORDER BY id;
--source suite/funcs_1/datadict/datadict_priv.inc
--real_sleep 0.3
@@ -205,7 +205,7 @@ connection con100;
SHOW GRANTS;
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
SHOW processlist;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
SELECT * FROM information_schema.processlist;
--real_sleep 0.3
@@ -217,7 +217,7 @@ connect (con101,localhost,ddicttestuser1,ddictpass,information_schema);
SHOW GRANTS;
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
SHOW processlist;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
SELECT * FROM information_schema.processlist;
--real_sleep 0.3
@@ -239,7 +239,7 @@ connect (anonymous1,localhost,"''",,information_schema);
SHOW GRANTS;
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
SHOW processlist;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
SELECT * FROM information_schema.processlist;
--real_sleep 0.3
@@ -261,7 +261,7 @@ connect (con102,localhost,ddicttestuser1,ddictpass,information_schema);
SHOW GRANTS;
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
SHOW processlist;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
SELECT * FROM information_schema.processlist;
--real_sleep 0.3
@@ -287,7 +287,7 @@ if ($fixed_bug_30395)
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
SHOW processlist;
}
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
SELECT * FROM information_schema.processlist;
--real_sleep 0.3
@@ -308,7 +308,7 @@ connect (con103,localhost,ddicttestuser1,ddictpass,information_schema);
SHOW GRANTS FOR 'ddicttestuser1'@'localhost';
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
SHOW processlist;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
SELECT * FROM information_schema.processlist;
--real_sleep 0.3
@@ -330,7 +330,7 @@ connect (con104,localhost,ddicttestuser1,ddictpass,information_schema);
SHOW GRANTS FOR 'ddicttestuser1'@'localhost';
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
SHOW processlist;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
SELECT * FROM information_schema.processlist;
--real_sleep 0.3
@@ -377,7 +377,7 @@ connect (con200,localhost,ddicttestuser2,ddictpass,information_schema);
SHOW GRANTS FOR 'ddicttestuser2'@'localhost';
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
SHOW processlist;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
SELECT * FROM information_schema.processlist;
--real_sleep 0.3
@@ -398,7 +398,7 @@ connect (con201,localhost,ddicttestuser2,ddictpass,information_schema);
SHOW GRANTS;
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
SHOW processlist;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
SELECT * FROM information_schema.processlist;
--real_sleep 0.3
@@ -421,7 +421,7 @@ SHOW GRANTS FOR 'ddicttestuser1'@'localhost';
GRANT PROCESS ON *.* TO 'ddicttestuser2'@'localhost';
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
SHOW processlist;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
SELECT * FROM information_schema.processlist;
--real_sleep 0.3
@@ -445,7 +445,7 @@ connect (con108,localhost,ddicttestuser1,ddictpass,information_schema);
SHOW GRANTS FOR 'ddicttestuser1'@'localhost';
--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
SHOW processlist;
---replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS
+--replace_column 1 ID 3 HOST_NAME 6 TIME 9 TIME_MS 13 MEMORY 14 ROWS
SELECT * FROM information_schema.processlist;
--real_sleep 0.3
diff --git a/mysql-test/suite/funcs_1/datadict/processlist_val.inc b/mysql-test/suite/funcs_1/datadict/processlist_val.inc
index bb6c13a6f4b..d0d2e606152 100644
--- a/mysql-test/suite/funcs_1/datadict/processlist_val.inc
+++ b/mysql-test/suite/funcs_1/datadict/processlist_val.inc
@@ -93,9 +93,9 @@ echo
# - INFO must contain the corresponding SHOW/SELECT PROCESSLIST
#
# 1. Just dump what we get
---replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS>
+--replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS> 13 <MEMORY> 14 <ROWS>
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
---replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS>
+--replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS> 13 <MEMORY> 14 <ROWS>
SHOW FULL PROCESSLIST;
#
# Determine the connection id of the current connection (default)
@@ -166,7 +166,7 @@ let $wait_condition= SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST
WHERE COMMAND = 'Sleep' AND USER = 'test_user';
--source include/wait_condition.inc
# 1. Just dump what we get
---replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS>
+--replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS> 13 <MEMORY> 14 <ROWS>
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
--replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME>
SHOW FULL PROCESSLIST;
@@ -211,7 +211,7 @@ echo
# ----- switch to connection con1 (user = test_user) -----
;
connection con1;
---replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS>
+--replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS> 13 <MEMORY> 14 <ROWS>
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
--replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME>
SHOW FULL PROCESSLIST;
@@ -245,7 +245,7 @@ echo
;
connection con2;
# Just dump what we get
---replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS>
+--replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS> 13 <MEMORY> 14 <ROWS>
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
--replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME>
SHOW FULL PROCESSLIST;
@@ -305,7 +305,7 @@ WHERE ID = @test_user_con2_id AND Command IN('Query','Execute')
AND State = 'User sleep' AND INFO IS NOT NULL ;
--source include/wait_condition.inc
# 1. Just dump what we get
---replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS>
+--replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS> 13 <MEMORY> 14 <ROWS>
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
--replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME>
SHOW FULL PROCESSLIST;
@@ -376,7 +376,7 @@ let $wait_condition= SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST
#
# Expect to see the state 'Waiting for table metadata lock' for the third
# connection because the SELECT collides with the WRITE TABLE LOCK.
---replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS>
+--replace_column 1 <ID> 3 <HOST_NAME> 6 <TIME> 9 <TIME_MS> 13 <MEMORY> 14 <ROWS>
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
UNLOCK TABLES;
#
@@ -435,7 +435,7 @@ echo
# SHOW FULL PROCESSLIST Complete statement
# SHOW PROCESSLIST statement truncated after 100 char
;
---replace_column 1 <ID> 3 <HOST_NAME> 5 <COMMAND> 6 <TIME> 7 <STATE> 9 <TIME_MS>
+--replace_column 1 <ID> 3 <HOST_NAME> 5 <COMMAND> 6 <TIME> 7 <STATE> 9 <TIME_MS> 13 <MEMORY> 14 <ROWS>
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
--replace_column 1 <ID> 3 <HOST_NAME> 5 <COMMAND> 6 <TIME> 7 <STATE>
SHOW FULL PROCESSLIST;
diff --git a/mysql-test/suite/funcs_1/r/innodb_func_view.result b/mysql-test/suite/funcs_1/r/innodb_func_view.result
index dd376154100..5504ec7efc2 100644
--- a/mysql-test/suite/funcs_1/r/innodb_func_view.result
+++ b/mysql-test/suite/funcs_1/r/innodb_func_view.result
@@ -3801,7 +3801,7 @@ NULL NULL 1
00:00:00 2
00:00:00 <--------30 characters-------> 3
-00:00:00 ---äÖüß@µ*$-- 4
-NULL -1 5
+-00:00:01 -1 5
41:58:00 1 17:58 22
Warnings:
Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
@@ -3820,7 +3820,7 @@ NULL NULL 1
00:00:00
00:00:00 <--------30 characters-------> 3
-00:00:00 ---äÖüß@µ*$--
-NULL -1
+-00:00:01 -1
41:58:00 1 17:58
Warnings:
Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
diff --git a/mysql-test/suite/funcs_1/r/is_columns_is.result b/mysql-test/suite/funcs_1/r/is_columns_is.result
index 4da7eeb2ada..e161e8ed058 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_is.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_is.result
@@ -221,10 +221,12 @@ def information_schema PLUGINS PLUGIN_TYPE_VERSION 5 NO varchar 20 60 NULL NULL
def information_schema PLUGINS PLUGIN_VERSION 2 NO varchar 20 60 NULL NULL NULL utf8 utf8_general_ci varchar(20) select
def information_schema PROCESSLIST COMMAND 5 NO varchar 16 48 NULL NULL NULL utf8 utf8_general_ci varchar(16) select
def information_schema PROCESSLIST DB 4 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select
+def information_schema PROCESSLIST EXAMINED_ROWS 14 0 NO int NULL NULL 10 0 NULL NULL NULL int(7) select
def information_schema PROCESSLIST HOST 3 NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select
def information_schema PROCESSLIST ID 1 0 NO bigint NULL NULL 19 0 NULL NULL NULL bigint(4) select
def information_schema PROCESSLIST INFO 8 NULL YES longtext 4294967295 4294967295 NULL NULL NULL utf8 utf8_general_ci longtext select
def information_schema PROCESSLIST MAX_STAGE 11 0 NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(2) select
+def information_schema PROCESSLIST MEMORY_USED 13 0 NO int NULL NULL 10 0 NULL NULL NULL int(7) select
def information_schema PROCESSLIST PROGRESS 12 0.000 NO decimal NULL NULL 7 3 NULL NULL NULL decimal(7,3) select
def information_schema PROCESSLIST STAGE 10 0 NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(2) select
def information_schema PROCESSLIST STATE 7 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select
@@ -699,6 +701,8 @@ NULL information_schema PROCESSLIST TIME_MS decimal NULL NULL NULL NULL decimal(
NULL information_schema PROCESSLIST STAGE tinyint NULL NULL NULL NULL tinyint(2)
NULL information_schema PROCESSLIST MAX_STAGE tinyint NULL NULL NULL NULL tinyint(2)
NULL information_schema PROCESSLIST PROGRESS decimal NULL NULL NULL NULL decimal(7,3)
+NULL information_schema PROCESSLIST MEMORY_USED int NULL NULL NULL NULL int(7)
+NULL information_schema PROCESSLIST EXAMINED_ROWS int NULL NULL NULL NULL int(7)
3.0000 information_schema REFERENTIAL_CONSTRAINTS CONSTRAINT_CATALOG varchar 512 1536 utf8 utf8_general_ci varchar(512)
3.0000 information_schema REFERENTIAL_CONSTRAINTS CONSTRAINT_SCHEMA varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema REFERENTIAL_CONSTRAINTS CONSTRAINT_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
diff --git a/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result b/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
index 3fb4d97748c..003f506fee8 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
@@ -222,10 +222,12 @@ def information_schema PLUGINS PLUGIN_TYPE_VERSION 5 NO varchar 20 60 NULL NULL
def information_schema PLUGINS PLUGIN_VERSION 2 NO varchar 20 60 NULL NULL NULL utf8 utf8_general_ci varchar(20)
def information_schema PROCESSLIST COMMAND 5 NO varchar 16 48 NULL NULL NULL utf8 utf8_general_ci varchar(16)
def information_schema PROCESSLIST DB 4 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64)
+def information_schema PROCESSLIST EXAMINED_ROWS 14 0 NO int NULL NULL 10 0 NULL NULL NULL int(7)
def information_schema PROCESSLIST HOST 3 NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64)
def information_schema PROCESSLIST ID 1 0 NO bigint NULL NULL 19 0 NULL NULL NULL bigint(4)
def information_schema PROCESSLIST INFO 8 NULL YES longtext 4294967295 4294967295 NULL NULL NULL utf8 utf8_general_ci longtext
def information_schema PROCESSLIST MAX_STAGE 11 0 NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(2)
+def information_schema PROCESSLIST MEMORY_USED 13 0 NO int NULL NULL 10 0 NULL NULL NULL int(7)
def information_schema PROCESSLIST PROGRESS 12 0.000 NO decimal NULL NULL 7 3 NULL NULL NULL decimal(7,3)
def information_schema PROCESSLIST STAGE 10 0 NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(2)
def information_schema PROCESSLIST STATE 7 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64)
@@ -704,6 +706,8 @@ NULL information_schema PROCESSLIST TIME_MS decimal NULL NULL NULL NULL decimal(
NULL information_schema PROCESSLIST STAGE tinyint NULL NULL NULL NULL tinyint(2)
NULL information_schema PROCESSLIST MAX_STAGE tinyint NULL NULL NULL NULL tinyint(2)
NULL information_schema PROCESSLIST PROGRESS decimal NULL NULL NULL NULL decimal(7,3)
+NULL information_schema PROCESSLIST MEMORY_USED int NULL NULL NULL NULL int(7)
+NULL information_schema PROCESSLIST EXAMINED_ROWS int NULL NULL NULL NULL int(7)
3.0000 information_schema REFERENTIAL_CONSTRAINTS CONSTRAINT_CATALOG varchar 512 1536 utf8 utf8_general_ci varchar(512)
3.0000 information_schema REFERENTIAL_CONSTRAINTS CONSTRAINT_SCHEMA varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema REFERENTIAL_CONSTRAINTS CONSTRAINT_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
diff --git a/mysql-test/suite/funcs_1/r/is_columns_mysql.result b/mysql-test/suite/funcs_1/r/is_columns_mysql.result
index 4eff12dab7b..df3ca6b366d 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_mysql.result
@@ -9,6 +9,14 @@ def mysql columns_priv Host 1 NO char 60 180 NULL NULL NULL utf8 utf8_bin char(
def mysql columns_priv Table_name 4 NO char 64 192 NULL NULL NULL utf8 utf8_bin char(64) PRI select,insert,update,references
def mysql columns_priv Timestamp 6 CURRENT_TIMESTAMP NO timestamp NULL NULL NULL NULL 0 NULL NULL timestamp on update CURRENT_TIMESTAMP select,insert,update,references
def mysql columns_priv User 3 NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) PRI select,insert,update,references
+def mysql column_stats avg_frequency 8 NULL YES decimal NULL NULL 12 4 NULL NULL NULL decimal(12,4) select,insert,update,references
+def mysql column_stats avg_length 7 NULL YES decimal NULL NULL 12 4 NULL NULL NULL decimal(12,4) select,insert,update,references
+def mysql column_stats column_name 3 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references
+def mysql column_stats db_name 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references
+def mysql column_stats max_value 5 NULL YES varchar 255 765 NULL NULL NULL utf8 utf8_bin varchar(255) select,insert,update,references
+def mysql column_stats min_value 4 NULL YES varchar 255 765 NULL NULL NULL utf8 utf8_bin varchar(255) select,insert,update,references
+def mysql column_stats nulls_ratio 6 NULL YES decimal NULL NULL 12 4 NULL NULL NULL decimal(12,4) select,insert,update,references
+def mysql column_stats table_name 2 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references
def mysql db Alter_priv 13 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references
def mysql db Alter_routine_priv 19 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references
def mysql db Create_priv 8 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references
@@ -59,7 +67,7 @@ def mysql func ret 2 0 NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(1) sele
def mysql func type 4 NULL NO enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('function','aggregate') select,insert,update,references
def mysql general_log argument 6 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext select,insert,update,references
def mysql general_log command_type 5 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references
-def mysql general_log event_time 1 CURRENT_TIMESTAMP NO timestamp NULL NULL NULL NULL 6 NULL NULL timestamp(6) on update CURRENT_TIMESTAMP select,insert,update,references
+def mysql general_log event_time 1 CURRENT_TIMESTAMP(6) NO timestamp NULL NULL NULL NULL 6 NULL NULL timestamp(6) on update CURRENT_TIMESTAMP select,insert,update,references
def mysql general_log server_id 4 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references
def mysql general_log thread_id 3 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
def mysql general_log user_host 2 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext select,insert,update,references
@@ -97,6 +105,11 @@ def mysql host Select_priv 3 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci e
def mysql host Show_view_priv 16 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references
def mysql host Trigger_priv 20 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references
def mysql host Update_priv 5 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references
+def mysql index_stats avg_frequency 5 NULL YES decimal NULL NULL 12 4 NULL NULL NULL decimal(12,4) select,insert,update,references
+def mysql index_stats db_name 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references
+def mysql index_stats index_name 3 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references
+def mysql index_stats prefix_arity 4 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) unsigned PRI select,insert,update,references
+def mysql index_stats table_name 2 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references
def mysql ndb_binlog_index deletes 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def mysql ndb_binlog_index epoch 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned PRI select,insert,update,references
def mysql ndb_binlog_index File 2 NULL NO varchar 255 255 NULL NULL NULL latin1 latin1_swedish_ci varchar(255) select,insert,update,references
@@ -159,7 +172,7 @@ def mysql slow_log rows_examined 6 NULL NO int NULL NULL 10 0 NULL NULL NULL int
def mysql slow_log rows_sent 5 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
def mysql slow_log server_id 10 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references
def mysql slow_log sql_text 11 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext select,insert,update,references
-def mysql slow_log start_time 1 CURRENT_TIMESTAMP NO timestamp NULL NULL NULL NULL 6 NULL NULL timestamp(6) on update CURRENT_TIMESTAMP select,insert,update,references
+def mysql slow_log start_time 1 CURRENT_TIMESTAMP(6) NO timestamp NULL NULL NULL NULL 6 NULL NULL timestamp(6) on update CURRENT_TIMESTAMP select,insert,update,references
def mysql slow_log user_host 2 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext select,insert,update,references
def mysql tables_priv Column_priv 8 NO set 31 93 NULL NULL NULL utf8 utf8_general_ci set('Select','Insert','Update','References') select,insert,update,references
def mysql tables_priv Db 2 NO char 64 192 NULL NULL NULL utf8 utf8_bin char(64) PRI select,insert,update,references
@@ -169,6 +182,9 @@ def mysql tables_priv Table_name 4 NO char 64 192 NULL NULL NULL utf8 utf8_bin
def mysql tables_priv Table_priv 7 NO set 98 294 NULL NULL NULL utf8 utf8_general_ci set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') select,insert,update,references
def mysql tables_priv Timestamp 6 CURRENT_TIMESTAMP NO timestamp NULL NULL NULL NULL 0 NULL NULL timestamp on update CURRENT_TIMESTAMP select,insert,update,references
def mysql tables_priv User 3 NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) PRI select,insert,update,references
+def mysql table_stats cardinality 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(21) unsigned select,insert,update,references
+def mysql table_stats db_name 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references
+def mysql table_stats table_name 2 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references
def mysql time_zone Time_zone_id 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI auto_increment select,insert,update,references
def mysql time_zone Use_leap_seconds 2 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('Y','N') select,insert,update,references
def mysql time_zone_leap_second Correction 2 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
@@ -258,6 +274,7 @@ ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
COL_CML DATA_TYPE CHARACTER_SET_NAME COLLATION_NAME
3.0000 char utf8 utf8_bin
3.0000 enum utf8 utf8_bin
+3.0000 varchar utf8 utf8_bin
3.0000 char utf8 utf8_general_ci
3.0000 enum utf8 utf8_general_ci
3.0000 set utf8 utf8_general_ci
@@ -274,6 +291,7 @@ ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
COL_CML DATA_TYPE CHARACTER_SET_NAME COLLATION_NAME
NULL bigint NULL NULL
NULL datetime NULL NULL
+NULL decimal NULL NULL
NULL int NULL NULL
NULL smallint NULL NULL
NULL time NULL NULL
@@ -302,6 +320,14 @@ COL_CML TABLE_SCHEMA TABLE_NAME COLUMN_NAME DATA_TYPE CHARACTER_MAXIMUM_LENGTH C
3.0000 mysql columns_priv Column_name char 64 192 utf8 utf8_bin char(64)
NULL mysql columns_priv Timestamp timestamp NULL NULL NULL NULL timestamp
3.0000 mysql columns_priv Column_priv set 31 93 utf8 utf8_general_ci set('Select','Insert','Update','References')
+3.0000 mysql column_stats db_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql column_stats table_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql column_stats column_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql column_stats min_value varchar 255 765 utf8 utf8_bin varchar(255)
+3.0000 mysql column_stats max_value varchar 255 765 utf8 utf8_bin varchar(255)
+NULL mysql column_stats nulls_ratio decimal NULL NULL NULL NULL decimal(12,4)
+NULL mysql column_stats avg_length decimal NULL NULL NULL NULL decimal(12,4)
+NULL mysql column_stats avg_frequency decimal NULL NULL NULL NULL decimal(12,4)
3.0000 mysql db Host char 60 180 utf8 utf8_bin char(60)
3.0000 mysql db Db char 64 192 utf8 utf8_bin char(64)
3.0000 mysql db User char 16 48 utf8 utf8_bin char(16)
@@ -390,6 +416,11 @@ NULL mysql help_topic help_category_id smallint NULL NULL NULL NULL smallint(5)
3.0000 mysql host Alter_routine_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
3.0000 mysql host Execute_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
3.0000 mysql host Trigger_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
+3.0000 mysql index_stats db_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql index_stats table_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql index_stats index_name varchar 64 192 utf8 utf8_bin varchar(64)
+NULL mysql index_stats prefix_arity int NULL NULL NULL NULL int(11) unsigned
+NULL mysql index_stats avg_frequency decimal NULL NULL NULL NULL decimal(12,4)
NULL mysql ndb_binlog_index Position bigint NULL NULL NULL NULL bigint(20) unsigned
1.0000 mysql ndb_binlog_index File varchar 255 255 latin1 latin1_swedish_ci varchar(255)
NULL mysql ndb_binlog_index epoch bigint NULL NULL NULL NULL bigint(20) unsigned
@@ -462,6 +493,9 @@ NULL mysql slow_log server_id int NULL NULL NULL NULL int(10) unsigned
NULL mysql tables_priv Timestamp timestamp NULL NULL NULL NULL timestamp
3.0000 mysql tables_priv Table_priv set 98 294 utf8 utf8_general_ci set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger')
3.0000 mysql tables_priv Column_priv set 31 93 utf8 utf8_general_ci set('Select','Insert','Update','References')
+3.0000 mysql table_stats db_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql table_stats table_name varchar 64 192 utf8 utf8_bin varchar(64)
+NULL mysql table_stats cardinality bigint NULL NULL NULL NULL bigint(21) unsigned
NULL mysql time_zone Time_zone_id int NULL NULL NULL NULL int(10) unsigned
3.0000 mysql time_zone Use_leap_seconds enum 1 3 utf8 utf8_general_ci enum('Y','N')
NULL mysql time_zone_leap_second Transition_time bigint NULL NULL NULL NULL bigint(20)
diff --git a/mysql-test/suite/funcs_1/r/is_columns_mysql_embedded.result b/mysql-test/suite/funcs_1/r/is_columns_mysql_embedded.result
index d99e7f06295..fc9e68a7850 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_mysql_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_mysql_embedded.result
@@ -9,6 +9,14 @@ def mysql columns_priv Host 1 NO char 60 180 NULL NULL NULL utf8 utf8_bin char(
def mysql columns_priv Table_name 4 NO char 64 192 NULL NULL NULL utf8 utf8_bin char(64) PRI
def mysql columns_priv Timestamp 6 CURRENT_TIMESTAMP NO timestamp NULL NULL NULL NULL 0 NULL NULL timestamp on update CURRENT_TIMESTAMP
def mysql columns_priv User 3 NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) PRI
+def mysql column_stats avg_frequency 8 NULL YES decimal NULL NULL 12 4 NULL NULL NULL decimal(12,4)
+def mysql column_stats avg_length 7 NULL YES decimal NULL NULL 12 4 NULL NULL NULL decimal(12,4)
+def mysql column_stats column_name 3 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI
+def mysql column_stats db_name 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI
+def mysql column_stats max_value 5 NULL YES varchar 255 765 NULL NULL NULL utf8 utf8_bin varchar(255)
+def mysql column_stats min_value 4 NULL YES varchar 255 765 NULL NULL NULL utf8 utf8_bin varchar(255)
+def mysql column_stats nulls_ratio 6 NULL YES decimal NULL NULL 12 4 NULL NULL NULL decimal(12,4)
+def mysql column_stats table_name 2 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI
def mysql db Alter_priv 13 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y')
def mysql db Alter_routine_priv 19 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y')
def mysql db Create_priv 8 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y')
@@ -59,7 +67,7 @@ def mysql func ret 2 0 NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(1)
def mysql func type 4 NULL NO enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('function','aggregate')
def mysql general_log argument 6 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext
def mysql general_log command_type 5 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64)
-def mysql general_log event_time 1 CURRENT_TIMESTAMP NO timestamp NULL NULL NULL NULL 6 NULL NULL timestamp(6) on update CURRENT_TIMESTAMP
+def mysql general_log event_time 1 CURRENT_TIMESTAMP(6) NO timestamp NULL NULL NULL NULL 6 NULL NULL timestamp(6) on update CURRENT_TIMESTAMP
def mysql general_log server_id 4 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned
def mysql general_log thread_id 3 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11)
def mysql general_log user_host 2 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext
@@ -97,6 +105,11 @@ def mysql host Select_priv 3 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci e
def mysql host Show_view_priv 16 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y')
def mysql host Trigger_priv 20 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y')
def mysql host Update_priv 5 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y')
+def mysql index_stats avg_frequency 5 NULL YES decimal NULL NULL 12 4 NULL NULL NULL decimal(12,4)
+def mysql index_stats db_name 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI
+def mysql index_stats index_name 3 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI
+def mysql index_stats prefix_arity 4 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) unsigned PRI
+def mysql index_stats table_name 2 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI
def mysql ndb_binlog_index deletes 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned
def mysql ndb_binlog_index epoch 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned PRI
def mysql ndb_binlog_index File 2 NULL NO varchar 255 255 NULL NULL NULL latin1 latin1_swedish_ci varchar(255)
@@ -159,7 +172,7 @@ def mysql slow_log rows_examined 6 NULL NO int NULL NULL 10 0 NULL NULL NULL int
def mysql slow_log rows_sent 5 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11)
def mysql slow_log server_id 10 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned
def mysql slow_log sql_text 11 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext
-def mysql slow_log start_time 1 CURRENT_TIMESTAMP NO timestamp NULL NULL NULL NULL 6 NULL NULL timestamp(6) on update CURRENT_TIMESTAMP
+def mysql slow_log start_time 1 CURRENT_TIMESTAMP(6) NO timestamp NULL NULL NULL NULL 6 NULL NULL timestamp(6) on update CURRENT_TIMESTAMP
def mysql slow_log user_host 2 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext
def mysql tables_priv Column_priv 8 NO set 31 93 NULL NULL NULL utf8 utf8_general_ci set('Select','Insert','Update','References')
def mysql tables_priv Db 2 NO char 64 192 NULL NULL NULL utf8 utf8_bin char(64) PRI
@@ -169,6 +182,9 @@ def mysql tables_priv Table_name 4 NO char 64 192 NULL NULL NULL utf8 utf8_bin
def mysql tables_priv Table_priv 7 NO set 98 294 NULL NULL NULL utf8 utf8_general_ci set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger')
def mysql tables_priv Timestamp 6 CURRENT_TIMESTAMP NO timestamp NULL NULL NULL NULL 0 NULL NULL timestamp on update CURRENT_TIMESTAMP
def mysql tables_priv User 3 NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) PRI
+def mysql table_stats cardinality 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(21) unsigned
+def mysql table_stats db_name 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI
+def mysql table_stats table_name 2 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI
def mysql time_zone Time_zone_id 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI auto_increment
def mysql time_zone Use_leap_seconds 2 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('Y','N')
def mysql time_zone_leap_second Correction 2 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11)
@@ -258,6 +274,7 @@ ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
COL_CML DATA_TYPE CHARACTER_SET_NAME COLLATION_NAME
3.0000 char utf8 utf8_bin
3.0000 enum utf8 utf8_bin
+3.0000 varchar utf8 utf8_bin
3.0000 char utf8 utf8_general_ci
3.0000 enum utf8 utf8_general_ci
3.0000 set utf8 utf8_general_ci
@@ -274,6 +291,7 @@ ORDER BY CHARACTER_SET_NAME, COLLATION_NAME, COL_CML;
COL_CML DATA_TYPE CHARACTER_SET_NAME COLLATION_NAME
NULL bigint NULL NULL
NULL datetime NULL NULL
+NULL decimal NULL NULL
NULL int NULL NULL
NULL smallint NULL NULL
NULL time NULL NULL
@@ -302,6 +320,14 @@ COL_CML TABLE_SCHEMA TABLE_NAME COLUMN_NAME DATA_TYPE CHARACTER_MAXIMUM_LENGTH C
3.0000 mysql columns_priv Column_name char 64 192 utf8 utf8_bin char(64)
NULL mysql columns_priv Timestamp timestamp NULL NULL NULL NULL timestamp
3.0000 mysql columns_priv Column_priv set 31 93 utf8 utf8_general_ci set('Select','Insert','Update','References')
+3.0000 mysql column_stats db_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql column_stats table_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql column_stats column_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql column_stats min_value varchar 255 765 utf8 utf8_bin varchar(255)
+3.0000 mysql column_stats max_value varchar 255 765 utf8 utf8_bin varchar(255)
+NULL mysql column_stats nulls_ratio decimal NULL NULL NULL NULL decimal(12,4)
+NULL mysql column_stats avg_length decimal NULL NULL NULL NULL decimal(12,4)
+NULL mysql column_stats avg_frequency decimal NULL NULL NULL NULL decimal(12,4)
3.0000 mysql db Host char 60 180 utf8 utf8_bin char(60)
3.0000 mysql db Db char 64 192 utf8 utf8_bin char(64)
3.0000 mysql db User char 16 48 utf8 utf8_bin char(16)
@@ -390,6 +416,11 @@ NULL mysql help_topic help_category_id smallint NULL NULL NULL NULL smallint(5)
3.0000 mysql host Alter_routine_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
3.0000 mysql host Execute_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
3.0000 mysql host Trigger_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
+3.0000 mysql index_stats db_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql index_stats table_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql index_stats index_name varchar 64 192 utf8 utf8_bin varchar(64)
+NULL mysql index_stats prefix_arity int NULL NULL NULL NULL int(11) unsigned
+NULL mysql index_stats avg_frequency decimal NULL NULL NULL NULL decimal(12,4)
NULL mysql ndb_binlog_index Position bigint NULL NULL NULL NULL bigint(20) unsigned
1.0000 mysql ndb_binlog_index File varchar 255 255 latin1 latin1_swedish_ci varchar(255)
NULL mysql ndb_binlog_index epoch bigint NULL NULL NULL NULL bigint(20) unsigned
@@ -462,6 +493,9 @@ NULL mysql slow_log server_id int NULL NULL NULL NULL int(10) unsigned
NULL mysql tables_priv Timestamp timestamp NULL NULL NULL NULL timestamp
3.0000 mysql tables_priv Table_priv set 98 294 utf8 utf8_general_ci set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger')
3.0000 mysql tables_priv Column_priv set 31 93 utf8 utf8_general_ci set('Select','Insert','Update','References')
+3.0000 mysql table_stats db_name varchar 64 192 utf8 utf8_bin varchar(64)
+3.0000 mysql table_stats table_name varchar 64 192 utf8 utf8_bin varchar(64)
+NULL mysql table_stats cardinality bigint NULL NULL NULL NULL bigint(21) unsigned
NULL mysql time_zone Time_zone_id int NULL NULL NULL NULL int(10) unsigned
3.0000 mysql time_zone Use_leap_seconds enum 1 3 utf8 utf8_general_ci enum('Y','N')
NULL mysql time_zone_leap_second Transition_time bigint NULL NULL NULL NULL bigint(20)
diff --git a/mysql-test/suite/funcs_1/r/is_engines_merge.result b/mysql-test/suite/funcs_1/r/is_engines_merge.result
index 3bc7a498581..b72c98bfd7e 100644
--- a/mysql-test/suite/funcs_1/r/is_engines_merge.result
+++ b/mysql-test/suite/funcs_1/r/is_engines_merge.result
@@ -1,6 +1,6 @@
SELECT * FROM information_schema.engines
WHERE ENGINE = 'MRG_MYISAM';
-ENGINE MRG_MYISAM
+ENGINE MRG_MyISAM
SUPPORT YES
COMMENT Collection of identical MyISAM tables
TRANSACTIONS NO
diff --git a/mysql-test/suite/funcs_1/r/is_key_column_usage.result b/mysql-test/suite/funcs_1/r/is_key_column_usage.result
index afd1fe15fed..5751276e943 100644
--- a/mysql-test/suite/funcs_1/r/is_key_column_usage.result
+++ b/mysql-test/suite/funcs_1/r/is_key_column_usage.result
@@ -75,6 +75,9 @@ table_schema, table_name, column_name
FROM information_schema.key_column_usage
WHERE constraint_catalog IS NOT NULL OR table_catalog IS NOT NULL;
constraint_catalog constraint_schema constraint_name table_catalog table_schema table_name column_name
+def mysql PRIMARY def mysql column_stats db_name
+def mysql PRIMARY def mysql column_stats table_name
+def mysql PRIMARY def mysql column_stats column_name
def mysql PRIMARY def mysql columns_priv Host
def mysql PRIMARY def mysql columns_priv Db
def mysql PRIMARY def mysql columns_priv User
@@ -96,6 +99,10 @@ def mysql PRIMARY def mysql help_topic help_topic_id
def mysql name def mysql help_topic name
def mysql PRIMARY def mysql host Host
def mysql PRIMARY def mysql host Db
+def mysql PRIMARY def mysql index_stats db_name
+def mysql PRIMARY def mysql index_stats table_name
+def mysql PRIMARY def mysql index_stats index_name
+def mysql PRIMARY def mysql index_stats prefix_arity
def mysql PRIMARY def mysql ndb_binlog_index epoch
def mysql PRIMARY def mysql plugin name
def mysql PRIMARY def mysql proc db
@@ -111,6 +118,8 @@ def mysql PRIMARY def mysql proxies_priv User
def mysql PRIMARY def mysql proxies_priv Proxied_host
def mysql PRIMARY def mysql proxies_priv Proxied_user
def mysql PRIMARY def mysql servers Server_name
+def mysql PRIMARY def mysql table_stats db_name
+def mysql PRIMARY def mysql table_stats table_name
def mysql PRIMARY def mysql tables_priv Host
def mysql PRIMARY def mysql tables_priv Db
def mysql PRIMARY def mysql tables_priv User
diff --git a/mysql-test/suite/funcs_1/r/is_statistics.result b/mysql-test/suite/funcs_1/r/is_statistics.result
index 8543b207728..4796839b0fd 100644
--- a/mysql-test/suite/funcs_1/r/is_statistics.result
+++ b/mysql-test/suite/funcs_1/r/is_statistics.result
@@ -85,6 +85,9 @@ INDEX_COMMENT varchar(1024) NO
SELECT table_catalog, table_schema, table_name, index_schema, index_name
FROM information_schema.statistics WHERE table_catalog IS NOT NULL;
table_catalog table_schema table_name index_schema index_name
+def mysql column_stats mysql PRIMARY
+def mysql column_stats mysql PRIMARY
+def mysql column_stats mysql PRIMARY
def mysql columns_priv mysql PRIMARY
def mysql columns_priv mysql PRIMARY
def mysql columns_priv mysql PRIMARY
@@ -107,6 +110,10 @@ def mysql help_topic mysql PRIMARY
def mysql help_topic mysql name
def mysql host mysql PRIMARY
def mysql host mysql PRIMARY
+def mysql index_stats mysql PRIMARY
+def mysql index_stats mysql PRIMARY
+def mysql index_stats mysql PRIMARY
+def mysql index_stats mysql PRIMARY
def mysql ndb_binlog_index mysql PRIMARY
def mysql plugin mysql PRIMARY
def mysql proc mysql PRIMARY
@@ -124,6 +131,8 @@ def mysql proxies_priv mysql PRIMARY
def mysql proxies_priv mysql PRIMARY
def mysql proxies_priv mysql Grantor
def mysql servers mysql PRIMARY
+def mysql table_stats mysql PRIMARY
+def mysql table_stats mysql PRIMARY
def mysql tables_priv mysql PRIMARY
def mysql tables_priv mysql PRIMARY
def mysql tables_priv mysql PRIMARY
diff --git a/mysql-test/suite/funcs_1/r/is_statistics_mysql.result b/mysql-test/suite/funcs_1/r/is_statistics_mysql.result
index 4c7d58f96f1..202cf9b6d2b 100644
--- a/mysql-test/suite/funcs_1/r/is_statistics_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_statistics_mysql.result
@@ -12,6 +12,9 @@ def mysql columns_priv 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql columns_priv 0 mysql PRIMARY 3 User A #CARD# NULL NULL BTREE
def mysql columns_priv 0 mysql PRIMARY 4 Table_name A #CARD# NULL NULL BTREE
def mysql columns_priv 0 mysql PRIMARY 5 Column_name A #CARD# NULL NULL BTREE
+def mysql column_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
+def mysql column_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
+def mysql column_stats 0 mysql PRIMARY 3 column_name A #CARD# NULL NULL BTREE
def mysql db 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
def mysql db 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql db 0 mysql PRIMARY 3 User A #CARD# NULL NULL BTREE
@@ -29,6 +32,10 @@ def mysql help_topic 0 mysql name 1 name A #CARD# NULL NULL BTREE
def mysql help_topic 0 mysql PRIMARY 1 help_topic_id A #CARD# NULL NULL BTREE
def mysql host 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
def mysql host 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 3 index_name A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 4 prefix_arity A #CARD# NULL NULL BTREE
def mysql ndb_binlog_index 0 mysql PRIMARY 1 epoch A #CARD# NULL NULL BTREE
def mysql plugin 0 mysql PRIMARY 1 name A #CARD# NULL NULL BTREE
def mysql proc 0 mysql PRIMARY 1 db A #CARD# NULL NULL BTREE
@@ -51,6 +58,8 @@ def mysql tables_priv 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
def mysql tables_priv 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql tables_priv 0 mysql PRIMARY 3 User A #CARD# NULL NULL BTREE
def mysql tables_priv 0 mysql PRIMARY 4 Table_name A #CARD# NULL NULL BTREE
+def mysql table_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
+def mysql table_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
def mysql time_zone 0 mysql PRIMARY 1 Time_zone_id A #CARD# NULL NULL BTREE
def mysql time_zone_leap_second 0 mysql PRIMARY 1 Transition_time A #CARD# NULL NULL BTREE
def mysql time_zone_name 0 mysql PRIMARY 1 Name A #CARD# NULL NULL BTREE
diff --git a/mysql-test/suite/funcs_1/r/is_statistics_mysql_embedded.result b/mysql-test/suite/funcs_1/r/is_statistics_mysql_embedded.result
index 9137d70b88c..a17423aad7f 100644
--- a/mysql-test/suite/funcs_1/r/is_statistics_mysql_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_statistics_mysql_embedded.result
@@ -12,6 +12,9 @@ def mysql columns_priv 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql columns_priv 0 mysql PRIMARY 3 User A #CARD# NULL NULL BTREE
def mysql columns_priv 0 mysql PRIMARY 4 Table_name A #CARD# NULL NULL BTREE
def mysql columns_priv 0 mysql PRIMARY 5 Column_name A #CARD# NULL NULL BTREE
+def mysql column_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
+def mysql column_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
+def mysql column_stats 0 mysql PRIMARY 3 column_name A #CARD# NULL NULL BTREE
def mysql db 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
def mysql db 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql db 0 mysql PRIMARY 3 User A #CARD# NULL NULL BTREE
@@ -29,6 +32,10 @@ def mysql help_topic 0 mysql name 1 name A #CARD# NULL NULL BTREE
def mysql help_topic 0 mysql PRIMARY 1 help_topic_id A #CARD# NULL NULL BTREE
def mysql host 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
def mysql host 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 3 index_name A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 4 prefix_arity A #CARD# NULL NULL BTREE
def mysql ndb_binlog_index 0 mysql PRIMARY 1 epoch A #CARD# NULL NULL BTREE
def mysql plugin 0 mysql PRIMARY 1 name A #CARD# NULL NULL BTREE
def mysql proc 0 mysql PRIMARY 1 db A #CARD# NULL NULL BTREE
@@ -51,6 +58,8 @@ def mysql tables_priv 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
def mysql tables_priv 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql tables_priv 0 mysql PRIMARY 3 User A #CARD# NULL NULL BTREE
def mysql tables_priv 0 mysql PRIMARY 4 Table_name A #CARD# NULL NULL BTREE
+def mysql table_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
+def mysql table_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
def mysql time_zone 0 mysql PRIMARY 1 Time_zone_id A #CARD# NULL NULL BTREE
def mysql time_zone_leap_second 0 mysql PRIMARY 1 Transition_time A #CARD# NULL NULL BTREE
def mysql time_zone_name 0 mysql PRIMARY 1 Name A #CARD# NULL NULL BTREE
@@ -70,6 +79,9 @@ def mysql columns_priv 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql columns_priv 0 mysql PRIMARY 3 User A #CARD# NULL NULL BTREE
def mysql columns_priv 0 mysql PRIMARY 4 Table_name A #CARD# NULL NULL BTREE
def mysql columns_priv 0 mysql PRIMARY 5 Column_name A #CARD# NULL NULL BTREE
+def mysql column_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
+def mysql column_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
+def mysql column_stats 0 mysql PRIMARY 3 column_name A #CARD# NULL NULL BTREE
def mysql db 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
def mysql db 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql db 0 mysql PRIMARY 3 User A #CARD# NULL NULL BTREE
@@ -87,6 +99,10 @@ def mysql help_topic 0 mysql name 1 name A #CARD# NULL NULL BTREE
def mysql help_topic 0 mysql PRIMARY 1 help_topic_id A #CARD# NULL NULL BTREE
def mysql host 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
def mysql host 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 3 index_name A #CARD# NULL NULL BTREE
+def mysql index_stats 0 mysql PRIMARY 4 prefix_arity A #CARD# NULL NULL BTREE
def mysql ndb_binlog_index 0 mysql PRIMARY 1 epoch A #CARD# NULL NULL BTREE
def mysql plugin 0 mysql PRIMARY 1 name A #CARD# NULL NULL BTREE
def mysql proc 0 mysql PRIMARY 1 db A #CARD# NULL NULL BTREE
@@ -109,6 +125,8 @@ def mysql tables_priv 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
def mysql tables_priv 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql tables_priv 0 mysql PRIMARY 3 User A #CARD# NULL NULL BTREE
def mysql tables_priv 0 mysql PRIMARY 4 Table_name A #CARD# NULL NULL BTREE
+def mysql table_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
+def mysql table_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
def mysql time_zone 0 mysql PRIMARY 1 Time_zone_id A #CARD# NULL NULL BTREE
def mysql time_zone_leap_second 0 mysql PRIMARY 1 Transition_time A #CARD# NULL NULL BTREE
def mysql time_zone_name 0 mysql PRIMARY 1 Name A #CARD# NULL NULL BTREE
diff --git a/mysql-test/suite/funcs_1/r/is_table_constraints.result b/mysql-test/suite/funcs_1/r/is_table_constraints.result
index 559a1f1f9f5..f6a1c493e4a 100644
--- a/mysql-test/suite/funcs_1/r/is_table_constraints.result
+++ b/mysql-test/suite/funcs_1/r/is_table_constraints.result
@@ -57,6 +57,7 @@ table_schema, table_name
FROM information_schema.table_constraints
WHERE constraint_catalog IS NOT NULL;
constraint_catalog constraint_schema constraint_name table_schema table_name
+def mysql PRIMARY mysql column_stats
def mysql PRIMARY mysql columns_priv
def mysql PRIMARY mysql db
def mysql PRIMARY mysql event
@@ -69,12 +70,14 @@ def mysql PRIMARY mysql help_relation
def mysql PRIMARY mysql help_topic
def mysql name mysql help_topic
def mysql PRIMARY mysql host
+def mysql PRIMARY mysql index_stats
def mysql PRIMARY mysql ndb_binlog_index
def mysql PRIMARY mysql plugin
def mysql PRIMARY mysql proc
def mysql PRIMARY mysql procs_priv
def mysql PRIMARY mysql proxies_priv
def mysql PRIMARY mysql servers
+def mysql PRIMARY mysql table_stats
def mysql PRIMARY mysql tables_priv
def mysql PRIMARY mysql time_zone
def mysql PRIMARY mysql time_zone_leap_second
diff --git a/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result b/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result
index bca333b6387..c778325d5f7 100644
--- a/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result
@@ -8,6 +8,7 @@ WHERE table_schema = 'mysql'
ORDER BY table_schema,table_name,constraint_name;
CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_SCHEMA TABLE_NAME CONSTRAINT_TYPE
def mysql PRIMARY mysql columns_priv PRIMARY KEY
+def mysql PRIMARY mysql column_stats PRIMARY KEY
def mysql PRIMARY mysql db PRIMARY KEY
def mysql PRIMARY mysql event PRIMARY KEY
def mysql PRIMARY mysql func PRIMARY KEY
@@ -19,6 +20,7 @@ def mysql PRIMARY mysql help_relation PRIMARY KEY
def mysql name mysql help_topic UNIQUE
def mysql PRIMARY mysql help_topic PRIMARY KEY
def mysql PRIMARY mysql host PRIMARY KEY
+def mysql PRIMARY mysql index_stats PRIMARY KEY
def mysql PRIMARY mysql ndb_binlog_index PRIMARY KEY
def mysql PRIMARY mysql plugin PRIMARY KEY
def mysql PRIMARY mysql proc PRIMARY KEY
@@ -26,6 +28,7 @@ def mysql PRIMARY mysql procs_priv PRIMARY KEY
def mysql PRIMARY mysql proxies_priv PRIMARY KEY
def mysql PRIMARY mysql servers PRIMARY KEY
def mysql PRIMARY mysql tables_priv PRIMARY KEY
+def mysql PRIMARY mysql table_stats PRIMARY KEY
def mysql PRIMARY mysql time_zone PRIMARY KEY
def mysql PRIMARY mysql time_zone_leap_second PRIMARY KEY
def mysql PRIMARY mysql time_zone_name PRIMARY KEY
diff --git a/mysql-test/suite/funcs_1/r/is_table_constraints_mysql_embedded.result b/mysql-test/suite/funcs_1/r/is_table_constraints_mysql_embedded.result
index 307357cdd2b..67e27a55cef 100644
--- a/mysql-test/suite/funcs_1/r/is_table_constraints_mysql_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_table_constraints_mysql_embedded.result
@@ -8,6 +8,7 @@ WHERE table_schema = 'mysql'
ORDER BY table_schema,table_name,constraint_name;
CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_SCHEMA TABLE_NAME CONSTRAINT_TYPE
def mysql PRIMARY mysql columns_priv PRIMARY KEY
+def mysql PRIMARY mysql column_stats PRIMARY KEY
def mysql PRIMARY mysql db PRIMARY KEY
def mysql PRIMARY mysql event PRIMARY KEY
def mysql PRIMARY mysql func PRIMARY KEY
@@ -19,6 +20,7 @@ def mysql PRIMARY mysql help_relation PRIMARY KEY
def mysql name mysql help_topic UNIQUE
def mysql PRIMARY mysql help_topic PRIMARY KEY
def mysql PRIMARY mysql host PRIMARY KEY
+def mysql PRIMARY mysql index_stats PRIMARY KEY
def mysql PRIMARY mysql ndb_binlog_index PRIMARY KEY
def mysql PRIMARY mysql plugin PRIMARY KEY
def mysql PRIMARY mysql proc PRIMARY KEY
@@ -26,6 +28,7 @@ def mysql PRIMARY mysql procs_priv PRIMARY KEY
def mysql PRIMARY mysql proxies_priv PRIMARY KEY
def mysql PRIMARY mysql servers PRIMARY KEY
def mysql PRIMARY mysql tables_priv PRIMARY KEY
+def mysql PRIMARY mysql table_stats PRIMARY KEY
def mysql PRIMARY mysql time_zone PRIMARY KEY
def mysql PRIMARY mysql time_zone_leap_second PRIMARY KEY
def mysql PRIMARY mysql time_zone_name PRIMARY KEY
@@ -38,6 +41,7 @@ WHERE table_schema = 'mysql'
ORDER BY table_schema,table_name,constraint_name;
CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_SCHEMA TABLE_NAME CONSTRAINT_TYPE
def mysql PRIMARY mysql columns_priv PRIMARY KEY
+def mysql PRIMARY mysql column_stats PRIMARY KEY
def mysql PRIMARY mysql db PRIMARY KEY
def mysql PRIMARY mysql event PRIMARY KEY
def mysql PRIMARY mysql func PRIMARY KEY
@@ -49,6 +53,7 @@ def mysql PRIMARY mysql help_relation PRIMARY KEY
def mysql name mysql help_topic UNIQUE
def mysql PRIMARY mysql help_topic PRIMARY KEY
def mysql PRIMARY mysql host PRIMARY KEY
+def mysql PRIMARY mysql index_stats PRIMARY KEY
def mysql PRIMARY mysql ndb_binlog_index PRIMARY KEY
def mysql PRIMARY mysql plugin PRIMARY KEY
def mysql PRIMARY mysql proc PRIMARY KEY
@@ -56,6 +61,7 @@ def mysql PRIMARY mysql procs_priv PRIMARY KEY
def mysql PRIMARY mysql proxies_priv PRIMARY KEY
def mysql PRIMARY mysql servers PRIMARY KEY
def mysql PRIMARY mysql tables_priv PRIMARY KEY
+def mysql PRIMARY mysql table_stats PRIMARY KEY
def mysql PRIMARY mysql time_zone PRIMARY KEY
def mysql PRIMARY mysql time_zone_leap_second PRIMARY KEY
def mysql PRIMARY mysql time_zone_name PRIMARY KEY
diff --git a/mysql-test/suite/funcs_1/r/is_tables_mysql.result b/mysql-test/suite/funcs_1/r/is_tables_mysql.result
index 1b317fa5bba..e59c5f04c96 100644
--- a/mysql-test/suite/funcs_1/r/is_tables_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_tables_mysql.result
@@ -37,6 +37,29 @@ user_comment Column privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
+TABLE_NAME column_stats
+TABLE_TYPE BASE TABLE
+ENGINE MYISAM_OR_MARIA
+VERSION 10
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_bin
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+user_comment Statistics on Columns
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA mysql
TABLE_NAME db
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
@@ -244,6 +267,29 @@ user_comment Host privileges; Merged with database privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
+TABLE_NAME index_stats
+TABLE_TYPE BASE TABLE
+ENGINE MYISAM_OR_MARIA
+VERSION 10
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_bin
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+user_comment Statistics on Indexes
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA mysql
TABLE_NAME ndb_binlog_index
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
@@ -428,6 +474,29 @@ user_comment Table privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
+TABLE_NAME table_stats
+TABLE_TYPE BASE TABLE
+ENGINE MYISAM_OR_MARIA
+VERSION 10
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_bin
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+user_comment Statistics on Tables
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA mysql
TABLE_NAME time_zone
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
diff --git a/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result b/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result
index e5afebd0de9..2f2367312f6 100644
--- a/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result
@@ -37,6 +37,29 @@ user_comment Column privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
+TABLE_NAME column_stats
+TABLE_TYPE BASE TABLE
+ENGINE MYISAM_OR_MARIA
+VERSION 10
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_bin
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+user_comment Statistics on Columns
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA mysql
TABLE_NAME db
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
@@ -244,6 +267,29 @@ user_comment Host privileges; Merged with database privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
+TABLE_NAME index_stats
+TABLE_TYPE BASE TABLE
+ENGINE MYISAM_OR_MARIA
+VERSION 10
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_bin
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+user_comment Statistics on Indexes
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA mysql
TABLE_NAME ndb_binlog_index
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
@@ -428,6 +474,29 @@ user_comment Table privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
+TABLE_NAME table_stats
+TABLE_TYPE BASE TABLE
+ENGINE MYISAM_OR_MARIA
+VERSION 10
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_bin
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+user_comment Statistics on Tables
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA mysql
TABLE_NAME time_zone
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
@@ -605,6 +674,29 @@ user_comment Column privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
+TABLE_NAME column_stats
+TABLE_TYPE BASE TABLE
+ENGINE MYISAM_OR_MARIA
+VERSION 10
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_bin
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+user_comment Statistics on Columns
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA mysql
TABLE_NAME db
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
@@ -812,6 +904,29 @@ user_comment Host privileges; Merged with database privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
+TABLE_NAME index_stats
+TABLE_TYPE BASE TABLE
+ENGINE MYISAM_OR_MARIA
+VERSION 10
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_bin
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+user_comment Statistics on Indexes
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA mysql
TABLE_NAME ndb_binlog_index
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
@@ -996,6 +1111,29 @@ user_comment Table privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
+TABLE_NAME table_stats
+TABLE_TYPE BASE TABLE
+ENGINE MYISAM_OR_MARIA
+VERSION 10
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_bin
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+user_comment Statistics on Tables
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA mysql
TABLE_NAME time_zone
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
diff --git a/mysql-test/suite/funcs_1/r/memory_func_view.result b/mysql-test/suite/funcs_1/r/memory_func_view.result
index 03e4c647d6d..7f7baf65d1f 100644
--- a/mysql-test/suite/funcs_1/r/memory_func_view.result
+++ b/mysql-test/suite/funcs_1/r/memory_func_view.result
@@ -3802,7 +3802,7 @@ NULL NULL 1
00:00:00 2
00:00:00 <--------30 characters-------> 3
-00:00:00 ---äÖüß@µ*$-- 4
-NULL -1 5
+-00:00:01 -1 5
41:58:00 1 17:58 22
Warnings:
Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
@@ -3821,7 +3821,7 @@ NULL NULL 1
00:00:00
00:00:00 <--------30 characters-------> 3
-00:00:00 ---äÖüß@µ*$--
-NULL -1
+-00:00:01 -1
41:58:00 1 17:58
Warnings:
Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
diff --git a/mysql-test/suite/funcs_1/r/myisam_func_view.result b/mysql-test/suite/funcs_1/r/myisam_func_view.result
index 03e4c647d6d..7f7baf65d1f 100644
--- a/mysql-test/suite/funcs_1/r/myisam_func_view.result
+++ b/mysql-test/suite/funcs_1/r/myisam_func_view.result
@@ -3802,7 +3802,7 @@ NULL NULL 1
00:00:00 2
00:00:00 <--------30 characters-------> 3
-00:00:00 ---äÖüß@µ*$-- 4
-NULL -1 5
+-00:00:01 -1 5
41:58:00 1 17:58 22
Warnings:
Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
@@ -3821,7 +3821,7 @@ NULL NULL 1
00:00:00
00:00:00 <--------30 characters-------> 3
-00:00:00 ---äÖüß@µ*$--
-NULL -1
+-00:00:01 -1
41:58:00 1 17:58
Warnings:
Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
diff --git a/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result b/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result
index 2112da6761e..17c88527959 100644
--- a/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result
+++ b/mysql-test/suite/funcs_1/r/processlist_priv_no_prot.result
@@ -33,28 +33,30 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` (
`TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000',
`STAGE` tinyint(2) NOT NULL DEFAULT '0',
`MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0',
- `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000'
+ `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000',
+ `MEMORY_USED` int(7) NOT NULL DEFAULT '0',
+ `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0'
) DEFAULT CHARSET=utf8
SHOW processlist;
Id User Host db Command Time State Info Progress
ID root HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
SELECT * FROM processlist ORDER BY id;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID root HOST_NAME information_schema Query TIME executing SELECT * FROM processlist ORDER BY id TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS FROM processlist ORDER BY id;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID root HOST_NAME information_schema Query TIME executing SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS FROM processlist ORDER BY id TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID root HOST_NAME information_schema Query TIME executing SELECT * FROM processlist ORDER BY id TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS FROM processlist ORDER BY id;
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID root HOST_NAME information_schema Query TIME executing SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS FROM processlist ORDER BY id TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
CREATE TEMPORARY TABLE test.t_processlist AS SELECT * FROM processlist;
UPDATE test.t_processlist SET user='horst' WHERE id=1 ;
INSERT INTO processlist SELECT * FROM test.t_processlist;
ERROR 42000: Access denied for user 'root'@'localhost' to database 'information_schema'
DROP TABLE test.t_processlist;
-CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS) AS SELECT * FROM processlist WITH CHECK OPTION;
+CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS) AS SELECT * FROM processlist WITH CHECK OPTION;
ERROR HY000: CHECK OPTION on non-updatable view 'test.v_processlist'
-CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS) AS SELECT * FROM processlist;
+CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS) AS SELECT * FROM processlist;
DROP VIEW test.v_processlist;
UPDATE processlist SET user='any_user' WHERE id=1 ;
ERROR 42000: Access denied for user 'root'@'localhost' to database 'information_schema'
@@ -106,25 +108,27 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` (
`TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000',
`STAGE` tinyint(2) NOT NULL DEFAULT '0',
`MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0',
- `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000'
+ `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000',
+ `MEMORY_USED` int(7) NOT NULL DEFAULT '0',
+ `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0'
) DEFAULT CHARSET=utf8
SHOW processlist;
Id User Host db Command Time State Info Progress
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM processlist ORDER BY id;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM processlist ORDER BY id TIME_MS 0 0 0.000
-SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS FROM processlist ORDER BY id;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS FROM processlist ORDER BY id TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM processlist ORDER BY id TIME_MS 0 0 0.000 MEMORY ROWS
+SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS FROM processlist ORDER BY id;
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS FROM processlist ORDER BY id TIME_MS 0 0 0.000 MEMORY ROWS
CREATE TEMPORARY TABLE test.t_processlist AS SELECT * FROM processlist;
UPDATE test.t_processlist SET user='horst' WHERE id=1 ;
INSERT INTO processlist SELECT * FROM test.t_processlist;
ERROR 42000: Access denied for user 'ddicttestuser1'@'localhost' to database 'information_schema'
DROP TABLE test.t_processlist;
-CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS) AS SELECT * FROM processlist WITH CHECK OPTION;
+CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS) AS SELECT * FROM processlist WITH CHECK OPTION;
ERROR HY000: CHECK OPTION on non-updatable view 'test.v_processlist'
-CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS) AS SELECT * FROM processlist;
+CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS) AS SELECT * FROM processlist;
DROP VIEW test.v_processlist;
UPDATE processlist SET user='any_user' WHERE id=1 ;
ERROR 42000: Access denied for user 'ddicttestuser1'@'localhost' to database 'information_schema'
@@ -175,8 +179,8 @@ SHOW processlist;
Id User Host db Command Time State Info Progress
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
4.2 New connection con101 (ddicttestuser1 with PROCESS privilege)
SHOW/SELECT shows all processes/threads.
@@ -190,10 +194,10 @@ ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
5 Grant PROCESS privilege to anonymous user.
connection default (user=root)
@@ -214,11 +218,11 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
6 Revoke PROCESS privilege from ddicttestuser1
connection default (user=root)
@@ -238,10 +242,10 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
7 Revoke PROCESS privilege from anonymous user
connection default (user=root)
@@ -256,9 +260,9 @@ SHOW GRANTS FOR ''@'localhost';
Grants for @localhost
GRANT USAGE ON *.* TO ''@'localhost'
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
8 Grant SUPER (does not imply PROCESS) privilege to ddicttestuser1
connection default (user=root)
@@ -278,11 +282,11 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
9 Revoke SUPER privilege from user ddicttestuser1
connection default (user=root)
@@ -304,12 +308,12 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
10 Grant SUPER privilege with grant option to user ddicttestuser1.
connection default (user=root)
@@ -358,18 +362,18 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser2 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser2 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser2 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
11 User ddicttestuser1 revokes PROCESS privilege from user ddicttestuser2
connection ddicttestuser1;
@@ -387,9 +391,9 @@ Id User Host db Command Time State Info Progress
ID ddicttestuser2 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser2 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser2 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser2 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser2 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser2 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
11.2 Revoke SUPER,PROCESS,GRANT OPTION privilege from user ddicttestuser1
connection default (user=root)
@@ -416,15 +420,15 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
12 Revoke the SELECT privilege from user ddicttestuser1
connection default (user=root)
@@ -452,16 +456,16 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Query TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
12.2 Revoke only the SELECT privilege on the information_schema from ddicttestuser1.
connection default (user=root)
diff --git a/mysql-test/suite/funcs_1/r/processlist_priv_ps.result b/mysql-test/suite/funcs_1/r/processlist_priv_ps.result
index 421636f3d92..112558c8280 100644
--- a/mysql-test/suite/funcs_1/r/processlist_priv_ps.result
+++ b/mysql-test/suite/funcs_1/r/processlist_priv_ps.result
@@ -33,28 +33,30 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` (
`TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000',
`STAGE` tinyint(2) NOT NULL DEFAULT '0',
`MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0',
- `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000'
+ `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000',
+ `MEMORY_USED` int(7) NOT NULL DEFAULT '0',
+ `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0'
) DEFAULT CHARSET=utf8
SHOW processlist;
Id User Host db Command Time State Info Progress
ID root HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
SELECT * FROM processlist ORDER BY id;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID root HOST_NAME information_schema Execute TIME executing SELECT * FROM processlist ORDER BY id TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS FROM processlist ORDER BY id;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID root HOST_NAME information_schema Execute TIME executing SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS FROM processlist ORDER BY id TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID root HOST_NAME information_schema Execute TIME executing SELECT * FROM processlist ORDER BY id TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS FROM processlist ORDER BY id;
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID root HOST_NAME information_schema Execute TIME executing SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS FROM processlist ORDER BY id TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
CREATE TEMPORARY TABLE test.t_processlist AS SELECT * FROM processlist;
UPDATE test.t_processlist SET user='horst' WHERE id=1 ;
INSERT INTO processlist SELECT * FROM test.t_processlist;
ERROR 42000: Access denied for user 'root'@'localhost' to database 'information_schema'
DROP TABLE test.t_processlist;
-CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS) AS SELECT * FROM processlist WITH CHECK OPTION;
+CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS) AS SELECT * FROM processlist WITH CHECK OPTION;
ERROR HY000: CHECK OPTION on non-updatable view 'test.v_processlist'
-CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS) AS SELECT * FROM processlist;
+CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS) AS SELECT * FROM processlist;
DROP VIEW test.v_processlist;
UPDATE processlist SET user='any_user' WHERE id=1 ;
ERROR 42000: Access denied for user 'root'@'localhost' to database 'information_schema'
@@ -106,25 +108,27 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` (
`TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000',
`STAGE` tinyint(2) NOT NULL DEFAULT '0',
`MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0',
- `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000'
+ `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000',
+ `MEMORY_USED` int(7) NOT NULL DEFAULT '0',
+ `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0'
) DEFAULT CHARSET=utf8
SHOW processlist;
Id User Host db Command Time State Info Progress
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM processlist ORDER BY id;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM processlist ORDER BY id TIME_MS 0 0 0.000
-SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS FROM processlist ORDER BY id;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS FROM processlist ORDER BY id TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM processlist ORDER BY id TIME_MS 0 0 0.000 MEMORY ROWS
+SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS FROM processlist ORDER BY id;
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS FROM processlist ORDER BY id TIME_MS 0 0 0.000 MEMORY ROWS
CREATE TEMPORARY TABLE test.t_processlist AS SELECT * FROM processlist;
UPDATE test.t_processlist SET user='horst' WHERE id=1 ;
INSERT INTO processlist SELECT * FROM test.t_processlist;
ERROR 42000: Access denied for user 'ddicttestuser1'@'localhost' to database 'information_schema'
DROP TABLE test.t_processlist;
-CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS) AS SELECT * FROM processlist WITH CHECK OPTION;
+CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS) AS SELECT * FROM processlist WITH CHECK OPTION;
ERROR HY000: CHECK OPTION on non-updatable view 'test.v_processlist'
-CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS) AS SELECT * FROM processlist;
+CREATE VIEW test.v_processlist (ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO, TIME_MS, STAGE, MAX_STAGE, PROGRESS, MEMORY_USED, EXAMINED_ROWS) AS SELECT * FROM processlist;
DROP VIEW test.v_processlist;
UPDATE processlist SET user='any_user' WHERE id=1 ;
ERROR 42000: Access denied for user 'ddicttestuser1'@'localhost' to database 'information_schema'
@@ -175,8 +179,8 @@ SHOW processlist;
Id User Host db Command Time State Info Progress
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
4.2 New connection con101 (ddicttestuser1 with PROCESS privilege)
SHOW/SELECT shows all processes/threads.
@@ -190,10 +194,10 @@ ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
5 Grant PROCESS privilege to anonymous user.
connection default (user=root)
@@ -214,11 +218,11 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
6 Revoke PROCESS privilege from ddicttestuser1
connection default (user=root)
@@ -238,10 +242,10 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
7 Revoke PROCESS privilege from anonymous user
connection default (user=root)
@@ -256,9 +260,9 @@ SHOW GRANTS FOR ''@'localhost';
Grants for @localhost
GRANT USAGE ON *.* TO ''@'localhost'
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
8 Grant SUPER (does not imply PROCESS) privilege to ddicttestuser1
connection default (user=root)
@@ -278,11 +282,11 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
9 Revoke SUPER privilege from user ddicttestuser1
connection default (user=root)
@@ -304,12 +308,12 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
10 Grant SUPER privilege with grant option to user ddicttestuser1.
connection default (user=root)
@@ -358,18 +362,18 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser2 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser2 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser2 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID root HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
11 User ddicttestuser1 revokes PROCESS privilege from user ddicttestuser2
connection ddicttestuser1;
@@ -387,9 +391,9 @@ Id User Host db Command Time State Info Progress
ID ddicttestuser2 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser2 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser2 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser2 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser2 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser2 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
11.2 Revoke SUPER,PROCESS,GRANT OPTION privilege from user ddicttestuser1
connection default (user=root)
@@ -416,15 +420,15 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
12 Revoke the SELECT privilege from user ddicttestuser1
connection default (user=root)
@@ -452,16 +456,16 @@ ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS
ID ddicttestuser1 HOST_NAME information_schema Query TIME NULL SHOW processlist TIME_MS
SELECT * FROM information_schema.processlist;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
-ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+ID ddicttestuser1 HOST_NAME information_schema Execute TIME executing SELECT * FROM information_schema.processlist TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
+ID ddicttestuser1 HOST_NAME information_schema Sleep TIME NULL TIME_MS 0 0 0.000 MEMORY ROWS
####################################################################################
12.2 Revoke only the SELECT privilege on the information_schema from ddicttestuser1.
connection default (user=root)
diff --git a/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result b/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result
index 0893fa3ca20..9560f77e0da 100644
--- a/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result
+++ b/mysql-test/suite/funcs_1/r/processlist_val_no_prot.result
@@ -23,14 +23,16 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` (
`TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000',
`STAGE` tinyint(2) NOT NULL DEFAULT '0',
`MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0',
- `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000'
+ `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000',
+ `MEMORY_USED` int(7) NOT NULL DEFAULT '0',
+ `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0'
) DEFAULT CHARSET=utf8
# Ensure that the information about the own connection is correct.
#--------------------------------------------------------------------------
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> root <HOST_NAME> test Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> root <HOST_NAME> test Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> root <HOST_NAME> test Query <TIME> NULL SHOW FULL PROCESSLIST <TIME_MS>
@@ -76,9 +78,9 @@ Has TIME a reasonable value?
# Poll till the connection con1 is in state COMMAND = 'Sleep'.
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000
-<ID> root <HOST_NAME> information_schema Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> root <HOST_NAME> information_schema Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> root <HOST_NAME> information_schema Query <TIME> NULL SHOW FULL PROCESSLIST 0.000
@@ -110,8 +112,8 @@ Expect 1
# ----- switch to connection con1 (user = test_user) -----
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> test_user <HOST_NAME> information_schema Query <TIME> NULL SHOW FULL PROCESSLIST 0.000
@@ -127,9 +129,9 @@ Id User Host db Command Time State Info Progress
# ----- switch to connection con2 (user = test_user) -----
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
-<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL 0.000
@@ -153,10 +155,10 @@ SELECT sleep(10), 17;
# Poll till connection con2 is in state 'User sleep'.
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema Query <TIME> User sleep SELECT sleep(10), 17 <TIME_MS> 0 0 0.000
-<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000
-<ID> root <HOST_NAME> information_schema Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema Query <TIME> User sleep SELECT sleep(10), 17 <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> root <HOST_NAME> information_schema Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> root <HOST_NAME> information_schema Query <TIME> NULL SHOW FULL PROCESSLIST 0.000
@@ -200,10 +202,10 @@ SELECT COUNT(*) FROM test.t1;
# Poll till INFO is no more NULL and State = 'Waiting for table metadata lock'.
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema Query <TIME> Waiting for table metadata lock SELECT COUNT(*) FROM test.t1 <TIME_MS> 0 0 0.000
-<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000
-<ID> root <HOST_NAME> information_schema Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema Query <TIME> Waiting for table metadata lock SELECT COUNT(*) FROM test.t1 <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> root <HOST_NAME> information_schema Query <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
UNLOCK TABLES;
# ----- switch to connection con2 (user = test_user) -----
@@ -234,10 +236,10 @@ SELECT count(*),'BEGIN-This is the representative of a very long statement.This
# SHOW PROCESSLIST statement truncated after 100 char
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> SELECT count(*),'BEGIN-This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.-END' AS "Long string" FROM test.t1 <TIME_MS> 0 0 0.000
-<ID> test_user <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> NULL <TIME_MS> 0 0 0.000
-<ID> root <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> SELECT count(*),'BEGIN-This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.-END' AS "Long string" FROM test.t1 <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> test_user <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> NULL <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> root <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> root <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> SHOW FULL PROCESSLIST 0.000
diff --git a/mysql-test/suite/funcs_1/r/processlist_val_ps.result b/mysql-test/suite/funcs_1/r/processlist_val_ps.result
index b64afa84279..aa24a46b718 100644
--- a/mysql-test/suite/funcs_1/r/processlist_val_ps.result
+++ b/mysql-test/suite/funcs_1/r/processlist_val_ps.result
@@ -23,14 +23,16 @@ PROCESSLIST CREATE TEMPORARY TABLE `PROCESSLIST` (
`TIME_MS` decimal(22,3) NOT NULL DEFAULT '0.000',
`STAGE` tinyint(2) NOT NULL DEFAULT '0',
`MAX_STAGE` tinyint(2) NOT NULL DEFAULT '0',
- `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000'
+ `PROGRESS` decimal(7,3) NOT NULL DEFAULT '0.000',
+ `MEMORY_USED` int(7) NOT NULL DEFAULT '0',
+ `EXAMINED_ROWS` int(7) NOT NULL DEFAULT '0'
) DEFAULT CHARSET=utf8
# Ensure that the information about the own connection is correct.
#--------------------------------------------------------------------------
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> root <HOST_NAME> test Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> root <HOST_NAME> test Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> root <HOST_NAME> test Query <TIME> NULL SHOW FULL PROCESSLIST <TIME_MS>
@@ -76,9 +78,9 @@ Has TIME a reasonable value?
# Poll till the connection con1 is in state COMMAND = 'Sleep'.
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000
-<ID> root <HOST_NAME> information_schema Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> root <HOST_NAME> information_schema Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> root <HOST_NAME> information_schema Query <TIME> NULL SHOW FULL PROCESSLIST 0.000
@@ -110,8 +112,8 @@ Expect 1
# ----- switch to connection con1 (user = test_user) -----
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> test_user <HOST_NAME> information_schema Query <TIME> NULL SHOW FULL PROCESSLIST 0.000
@@ -127,9 +129,9 @@ Id User Host db Command Time State Info Progress
# ----- switch to connection con2 (user = test_user) -----
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
-<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL 0.000
@@ -153,10 +155,10 @@ SELECT sleep(10), 17;
# Poll till connection con2 is in state 'User sleep'.
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema Query <TIME> User sleep SELECT sleep(10), 17 <TIME_MS> 0 0 0.000
-<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000
-<ID> root <HOST_NAME> information_schema Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema Query <TIME> User sleep SELECT sleep(10), 17 <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> root <HOST_NAME> information_schema Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> root <HOST_NAME> information_schema Query <TIME> NULL SHOW FULL PROCESSLIST 0.000
@@ -200,10 +202,10 @@ SELECT COUNT(*) FROM test.t1;
# Poll till INFO is no more NULL and State = 'Waiting for table metadata lock'.
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema Query <TIME> Waiting for table metadata lock SELECT COUNT(*) FROM test.t1 <TIME_MS> 0 0 0.000
-<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000
-<ID> root <HOST_NAME> information_schema Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema Query <TIME> Waiting for table metadata lock SELECT COUNT(*) FROM test.t1 <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> test_user <HOST_NAME> information_schema Sleep <TIME> NULL <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> root <HOST_NAME> information_schema Execute <TIME> executing SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
UNLOCK TABLES;
# ----- switch to connection con2 (user = test_user) -----
@@ -234,10 +236,10 @@ SELECT count(*),'BEGIN-This is the representative of a very long statement.This
# SHOW PROCESSLIST statement truncated after 100 char
SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST;
-ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS
-<ID> test_user <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> SELECT count(*),'BEGIN-This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.-END' AS "Long string" FROM test.t1 <TIME_MS> 0 0 0.000
-<ID> test_user <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> NULL <TIME_MS> 0 0 0.000
-<ID> root <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000
+ID USER HOST DB COMMAND TIME STATE INFO TIME_MS STAGE MAX_STAGE PROGRESS MEMORY_USED EXAMINED_ROWS
+<ID> test_user <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> SELECT count(*),'BEGIN-This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.This is the representative of a very long statement.-END' AS "Long string" FROM test.t1 <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> test_user <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> NULL <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
+<ID> root <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST <TIME_MS> 0 0 0.000 <MEMORY> <ROWS>
SHOW FULL PROCESSLIST;
Id User Host db Command Time State Info Progress
<ID> root <HOST_NAME> information_schema <COMMAND> <TIME> <STATE> SHOW FULL PROCESSLIST 0.000
diff --git a/mysql-test/suite/innodb/r/binlog_consistent.result b/mysql-test/suite/innodb/r/binlog_consistent.result
index c07719da297..df22d634339 100644
--- a/mysql-test/suite/innodb/r/binlog_consistent.result
+++ b/mysql-test/suite/innodb/r/binlog_consistent.result
@@ -3,11 +3,11 @@ RESET MASTER;
CREATE TABLE t1 (a INT, b VARCHAR(100), PRIMARY KEY (a,b)) ENGINE=innodb;
SHOW MASTER STATUS;
File Position Binlog_Do_DB Binlog_Ignore_DB
-master-bin.000001 380
+master-bin.000001 421
SHOW STATUS LIKE 'binlog_snapshot_%';
Variable_name Value
Binlog_snapshot_file master-bin.000001
-Binlog_snapshot_position 380
+Binlog_snapshot_position 421
BEGIN;
INSERT INTO t1 VALUES (0, "");
# Connection con1
@@ -38,10 +38,10 @@ a b
SHOW STATUS LIKE 'binlog_snapshot_%';
Variable_name Value
Binlog_snapshot_file master-bin.000001
-Binlog_snapshot_position 904
+Binlog_snapshot_position 945
SHOW MASTER STATUS;
File Position Binlog_Do_DB Binlog_Ignore_DB
-master-bin.000001 1316
+master-bin.000001 1357
SELECT * FROM t2 ORDER BY a;
a
2
@@ -60,44 +60,45 @@ a b
SHOW STATUS LIKE 'binlog_snapshot_%';
Variable_name Value
Binlog_snapshot_file master-bin.000001
-Binlog_snapshot_position 904
+Binlog_snapshot_position 945
SHOW MASTER STATUS;
File Position Binlog_Do_DB Binlog_Ignore_DB
-master-bin.000002 245
+master-bin.000002 326
COMMIT;
SHOW STATUS LIKE 'binlog_snapshot_%';
Variable_name Value
Binlog_snapshot_file master-bin.000002
-Binlog_snapshot_position 245
+Binlog_snapshot_position 326
SHOW MASTER STATUS;
File Position Binlog_Do_DB Binlog_Ignore_DB
-master-bin.000002 245
+master-bin.000002 326
SHOW BINLOG EVENTS;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 4 Format_desc 1 245 Server ver: #, Binlog ver: #
-master-bin.000001 245 Query 1 380 use `test`; CREATE TABLE t1 (a INT, b VARCHAR(100), PRIMARY KEY (a,b)) ENGINE=innodb
-master-bin.000001 380 Query 1 492 use `test`; CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=myisam
-master-bin.000001 492 Query 1 560 BEGIN
-master-bin.000001 560 Query 1 648 use `test`; INSERT INTO t2 VALUES (2)
-master-bin.000001 648 Query 1 717 COMMIT
-master-bin.000001 717 Query 1 785 BEGIN
-master-bin.000001 785 Query 1 877 use `test`; INSERT INTO t1 VALUES (0, "")
-master-bin.000001 877 Xid 1 904 COMMIT /* XID */
-master-bin.000001 904 Query 1 972 BEGIN
-master-bin.000001 972 Query 1 1060 use `test`; INSERT INTO t2 VALUES (3)
-master-bin.000001 1060 Query 1 1129 COMMIT
-master-bin.000001 1129 Query 1 1197 BEGIN
-master-bin.000001 1197 Query 1 1289 use `test`; INSERT INTO t1 VALUES (4, "")
-master-bin.000001 1289 Xid 1 1316 COMMIT /* XID */
-master-bin.000001 1316 Query 1 1384 BEGIN
-master-bin.000001 1384 Query 1 1476 use `test`; INSERT INTO t1 VALUES (1, "")
-master-bin.000001 1476 Xid 1 1503 COMMIT /* XID */
-master-bin.000001 1503 Query 1 1571 BEGIN
-master-bin.000001 1571 Query 1 1668 use `test`; INSERT INTO t1 VALUES (2, "first")
-master-bin.000001 1668 Query 1 1766 use `test`; INSERT INTO t1 VALUES (2, "second")
-master-bin.000001 1766 Xid 1 1793 COMMIT /* XID */
-master-bin.000001 1793 Query 1 1861 BEGIN
-master-bin.000001 1861 Query 1 1953 use `test`; INSERT INTO t1 VALUES (3, "")
-master-bin.000001 1953 Xid 1 1980 COMMIT /* XID */
-master-bin.000001 1980 Rotate 1 2024 master-bin.000002;pos=4
+master-bin.000001 4 Format_desc 1 246 Server ver: #, Binlog ver: #
+master-bin.000001 246 Binlog_checkpoint 1 286 master-bin.000001
+master-bin.000001 286 Query 1 421 use `test`; CREATE TABLE t1 (a INT, b VARCHAR(100), PRIMARY KEY (a,b)) ENGINE=innodb
+master-bin.000001 421 Query 1 533 use `test`; CREATE TABLE t2 (a INT PRIMARY KEY) ENGINE=myisam
+master-bin.000001 533 Query 1 601 BEGIN
+master-bin.000001 601 Query 1 689 use `test`; INSERT INTO t2 VALUES (2)
+master-bin.000001 689 Query 1 758 COMMIT
+master-bin.000001 758 Query 1 826 BEGIN
+master-bin.000001 826 Query 1 918 use `test`; INSERT INTO t1 VALUES (0, "")
+master-bin.000001 918 Xid 1 945 COMMIT /* XID */
+master-bin.000001 945 Query 1 1013 BEGIN
+master-bin.000001 1013 Query 1 1101 use `test`; INSERT INTO t2 VALUES (3)
+master-bin.000001 1101 Query 1 1170 COMMIT
+master-bin.000001 1170 Query 1 1238 BEGIN
+master-bin.000001 1238 Query 1 1330 use `test`; INSERT INTO t1 VALUES (4, "")
+master-bin.000001 1330 Xid 1 1357 COMMIT /* XID */
+master-bin.000001 1357 Query 1 1425 BEGIN
+master-bin.000001 1425 Query 1 1517 use `test`; INSERT INTO t1 VALUES (1, "")
+master-bin.000001 1517 Xid 1 1544 COMMIT /* XID */
+master-bin.000001 1544 Query 1 1612 BEGIN
+master-bin.000001 1612 Query 1 1709 use `test`; INSERT INTO t1 VALUES (2, "first")
+master-bin.000001 1709 Query 1 1807 use `test`; INSERT INTO t1 VALUES (2, "second")
+master-bin.000001 1807 Xid 1 1834 COMMIT /* XID */
+master-bin.000001 1834 Query 1 1902 BEGIN
+master-bin.000001 1902 Query 1 1994 use `test`; INSERT INTO t1 VALUES (3, "")
+master-bin.000001 1994 Xid 1 2021 COMMIT /* XID */
+master-bin.000001 2021 Rotate 1 2065 master-bin.000002;pos=4
DROP TABLE t1,t2;
diff --git a/mysql-test/suite/innodb/r/group_commit_binlog_pos.result b/mysql-test/suite/innodb/r/group_commit_binlog_pos.result
index d28ad1fd70e..ccf458809d8 100644
--- a/mysql-test/suite/innodb/r/group_commit_binlog_pos.result
+++ b/mysql-test/suite/innodb/r/group_commit_binlog_pos.result
@@ -1,3 +1,4 @@
+SET GLOBAL innodb_flush_log_at_trx_commit=3;
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=innodb;
INSERT INTO t1 VALUES (0);
SET DEBUG_SYNC= "commit_after_get_LOCK_log SIGNAL con1_waiting WAIT_FOR con3_queued";
@@ -30,6 +31,6 @@ a
1
2
3
-InnoDB: Last MySQL binlog file position 0 906, file name ./master-bin.000001
+InnoDB: Last MySQL binlog file position 0 947, file name ./master-bin.000001
SET DEBUG_SYNC= 'RESET';
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/group_commit_binlog_pos_no_optimize_thread.result b/mysql-test/suite/innodb/r/group_commit_binlog_pos_no_optimize_thread.result
index da8cff142b8..44cf2f3979d 100644
--- a/mysql-test/suite/innodb/r/group_commit_binlog_pos_no_optimize_thread.result
+++ b/mysql-test/suite/innodb/r/group_commit_binlog_pos_no_optimize_thread.result
@@ -1,3 +1,4 @@
+SET GLOBAL innodb_flush_log_at_trx_commit=3;
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=innodb;
INSERT INTO t1 VALUES (0);
SET DEBUG_SYNC= "commit_after_get_LOCK_log SIGNAL con1_waiting WAIT_FOR con3_queued";
@@ -31,6 +32,6 @@ a
1
2
3
-InnoDB: Last MySQL binlog file position 0 906, file name ./master-bin.000001
+InnoDB: Last MySQL binlog file position 0 947, file name ./master-bin.000001
SET DEBUG_SYNC= 'RESET';
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/group_commit_crash.result b/mysql-test/suite/innodb/r/group_commit_crash.result
index cd47ba62ff2..c084f854e79 100644
--- a/mysql-test/suite/innodb/r/group_commit_crash.result
+++ b/mysql-test/suite/innodb/r/group_commit_crash.result
@@ -36,7 +36,7 @@ COMMIT;
Got one of the listed errors
SELECT * FROM t1 ORDER BY id;
a b c d id
-SHOW BINLOG EVENTS LIMIT 2,1;
+SHOW BINLOG EVENTS LIMIT 3,1;
Log_name Pos Event_type Server_id End_log_pos Info
delete from t1;
SET binlog_format= mixed;
@@ -58,7 +58,7 @@ a b c d 7
a b c d 8
a b c d 9
a b c d 10
-SHOW BINLOG EVENTS LIMIT 2,1;
+SHOW BINLOG EVENTS LIMIT 3,1;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # use `test`; insert into t1 select * from t2
delete from t1;
@@ -81,7 +81,7 @@ a b c d 7
a b c d 8
a b c d 9
a b c d 10
-SHOW BINLOG EVENTS LIMIT 2,1;
+SHOW BINLOG EVENTS LIMIT 3,1;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # use `test`; insert into t1 select * from t2
delete from t1;
@@ -104,7 +104,7 @@ a b c d 7
a b c d 8
a b c d 9
a b c d 10
-SHOW BINLOG EVENTS LIMIT 2,1;
+SHOW BINLOG EVENTS LIMIT 3,1;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # use `test`; insert into t1 select * from t2
delete from t1;
@@ -117,7 +117,7 @@ COMMIT;
Got one of the listed errors
SELECT * FROM t1 ORDER BY id;
a b c d id
-SHOW BINLOG EVENTS LIMIT 2,1;
+SHOW BINLOG EVENTS LIMIT 3,1;
Log_name Pos Event_type Server_id End_log_pos Info
delete from t1;
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result b/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result
index c1ae510d45b..40c270a76d3 100644
--- a/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result
+++ b/mysql-test/suite/innodb/r/group_commit_crash_no_optimize_thread.result
@@ -36,7 +36,7 @@ COMMIT;
Got one of the listed errors
SELECT * FROM t1 ORDER BY id;
a b c d id
-SHOW BINLOG EVENTS LIMIT 2,1;
+SHOW BINLOG EVENTS LIMIT 3,1;
Log_name Pos Event_type Server_id End_log_pos Info
delete from t1;
SET binlog_format= mixed;
@@ -58,7 +58,7 @@ a b c d 7
a b c d 8
a b c d 9
a b c d 10
-SHOW BINLOG EVENTS LIMIT 2,1;
+SHOW BINLOG EVENTS LIMIT 3,1;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # use `test`; insert into t1 select * from t2
delete from t1;
@@ -81,7 +81,7 @@ a b c d 7
a b c d 8
a b c d 9
a b c d 10
-SHOW BINLOG EVENTS LIMIT 2,1;
+SHOW BINLOG EVENTS LIMIT 3,1;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # use `test`; insert into t1 select * from t2
delete from t1;
@@ -104,7 +104,7 @@ a b c d 7
a b c d 8
a b c d 9
a b c d 10
-SHOW BINLOG EVENTS LIMIT 2,1;
+SHOW BINLOG EVENTS LIMIT 3,1;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # use `test`; insert into t1 select * from t2
delete from t1;
@@ -117,7 +117,7 @@ COMMIT;
Got one of the listed errors
SELECT * FROM t1 ORDER BY id;
a b c d id
-SHOW BINLOG EVENTS LIMIT 2,1;
+SHOW BINLOG EVENTS LIMIT 3,1;
Log_name Pos Event_type Server_id End_log_pos Info
delete from t1;
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb-create-options.result b/mysql-test/suite/innodb/r/innodb-create-options.result
index 94c84fcf60f..c73baa60d42 100644
--- a/mysql-test/suite/innodb/r/innodb-create-options.result
+++ b/mysql-test/suite/innodb/r/innodb-create-options.result
@@ -10,11 +10,11 @@ Note 1051 Unknown table 't1'
# 'FIXED' is sent to InnoDB since it is used by MyISAM.
# But it is an invalid mode in InnoDB
CREATE TABLE t1 ( i INT ) ROW_FORMAT=FIXED;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: invalid ROW_FORMAT specifier.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: invalid ROW_FORMAT specifier.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
Level Code Message
@@ -46,11 +46,11 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: invalid ROW_FORMAT specifier.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: invalid ROW_FORMAT specifier.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact
@@ -58,23 +58,23 @@ t1 Compact
# KEY_BLOCK_SIZE is incompatible with COMPACT, REDUNDANT, & DYNAMIC
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=1;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
SHOW WARNINGS;
Level Code Message
@@ -104,29 +104,29 @@ t1 Compressed key_block_size=16
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=1;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: invalid ROW_FORMAT specifier.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: invalid ROW_FORMAT specifier.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=2;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=8;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=16;
SHOW WARNINGS;
Level Code Message
@@ -146,11 +146,11 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT
ALTER TABLE t1 KEY_BLOCK_SIZE=2;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
Level Code Message
@@ -158,11 +158,11 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Redundant row_format=REDUNDANT
ALTER TABLE t1 KEY_BLOCK_SIZE=4;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
SHOW WARNINGS;
Level Code Message
@@ -170,11 +170,11 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC
ALTER TABLE t1 KEY_BLOCK_SIZE=8;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
SHOW WARNINGS;
Level Code Message
@@ -212,23 +212,23 @@ t1 CREATE TABLE `t1` (
`f1` int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=2
ALTER TABLE t1 ROW_FORMAT=COMPACT;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
SHOW WARNINGS;
Level Code Message
@@ -250,11 +250,11 @@ t1 Compact row_format=COMPACT
# Test 6) StrictMode=ON, CREATE with an invalid KEY_BLOCK_SIZE.
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=9;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
# Test 7) StrictMode=ON, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and
# and a valid non-zero KEY_BLOCK_SIZE are rejected with Antelope
# and that they can be set to default values during strict mode.
@@ -263,23 +263,23 @@ DROP TABLE IF EXISTS t1;
Warnings:
Note 1051 Unknown table 't1'
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=4;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
Level Code Message
@@ -298,34 +298,34 @@ CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT;
SHOW WARNINGS;
Level Code Message
ALTER TABLE t1 KEY_BLOCK_SIZE=8;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SET GLOBAL innodb_file_format=Barracuda;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
SET GLOBAL innodb_file_format=Antelope;
ALTER TABLE t1 ADD COLUMN f1 INT;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
Level Code Message
@@ -340,23 +340,23 @@ SET GLOBAL innodb_file_format=Barracuda;
SET GLOBAL innodb_file_per_table=OFF;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=16;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
Level Code Message
@@ -375,23 +375,23 @@ CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT;
SHOW WARNINGS;
Level Code Message
ALTER TABLE t1 KEY_BLOCK_SIZE=1;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=COMPACT;
SHOW WARNINGS;
Level Code Message
@@ -415,12 +415,12 @@ DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
SET GLOBAL innodb_file_per_table=OFF;
ALTER TABLE t1 ADD COLUMN f1 INT;
-ERROR HY000: Can't create table '#sql-temporary' (errno: 1478)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Error 1005 Can't create table '#sql-temporary' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Error 1005 Can't create table '#sql-temporary' (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
Level Code Message
@@ -437,10 +437,10 @@ SET SESSION innodb_strict_mode = OFF;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=FIXED;
Warnings:
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=FIXED
@@ -477,10 +477,10 @@ TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0;
Warnings:
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=FIXED
@@ -489,30 +489,30 @@ t1 Compact row_format=FIXED
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=1;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT key_block_size=1
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Redundant row_format=REDUNDANT key_block_size=2
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC key_block_size=4
@@ -547,12 +547,12 @@ DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=1;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=FIXED key_block_size=1
@@ -560,10 +560,10 @@ DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=2;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT key_block_size=2
@@ -571,10 +571,10 @@ DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC key_block_size=4
@@ -582,10 +582,10 @@ DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=8;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=8 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=8 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=8 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=8 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Redundant row_format=REDUNDANT key_block_size=8
@@ -611,28 +611,28 @@ TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT
ALTER TABLE t1 KEY_BLOCK_SIZE=2;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT key_block_size=2
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Redundant row_format=REDUNDANT key_block_size=2
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC key_block_size=2
@@ -678,28 +678,28 @@ t1 CREATE TABLE `t1` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=16
ALTER TABLE t1 ROW_FORMAT=COMPACT;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT key_block_size=16
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Redundant row_format=REDUNDANT key_block_size=16
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC key_block_size=16
@@ -725,10 +725,10 @@ t1 Compact row_format=COMPACT
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=15;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=15.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=15.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=15.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=15.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact key_block_size=15
@@ -745,16 +745,16 @@ t1 Compressed row_format=COMPRESSED key_block_size=1
SET GLOBAL innodb_file_format=Antelope;
ALTER TABLE t1 ADD COLUMN f1 INT;
Warnings:
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1.
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1.
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1.
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1.
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPRESSED key_block_size=1
@@ -775,12 +775,12 @@ t1 Dynamic row_format=DYNAMIC
SET GLOBAL innodb_file_format=Antelope;
ALTER TABLE t1 ADD COLUMN f1 INT;
Warnings:
-Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=DYNAMIC
@@ -804,16 +804,16 @@ t1 Compressed row_format=COMPRESSED key_block_size=2
SET GLOBAL innodb_file_per_table=OFF;
ALTER TABLE t1 ADD COLUMN f1 INT;
Warnings:
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2.
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2.
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2.
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2.
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPRESSED key_block_size=2
@@ -834,12 +834,12 @@ t1 Dynamic row_format=DYNAMIC
SET GLOBAL innodb_file_per_table=OFF;
ALTER TABLE t1 ADD COLUMN f1 INT;
Warnings:
-Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=DYNAMIC
diff --git a/mysql-test/suite/innodb/r/innodb-index.result b/mysql-test/suite/innodb/r/innodb-index.result
index 48390b14a82..0f004772153 100644
--- a/mysql-test/suite/innodb/r/innodb-index.result
+++ b/mysql-test/suite/innodb/r/innodb-index.result
@@ -545,7 +545,7 @@ t4 CREATE TABLE `t4` (
CONSTRAINT `dc` FOREIGN KEY (`a`) REFERENCES `t1` (`a`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
alter table t3 add constraint dc foreign key (a) references t1(a);
-ERROR HY000: Can't create table '#sql-temporary' (errno: 121)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 121 "Duplicate key on write or update")
show create table t3;
Table Create Table
t3 CREATE TABLE `t3` (
@@ -578,7 +578,7 @@ ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fail
drop index dc on t4;
ERROR 42000: Can't DROP 'dc'; check that column/key exists
alter table t3 drop foreign key dc;
-ERROR HY000: Error on rename of './test/t3' to '#sql2-temporary' (errno: 152)
+ERROR HY000: Error on rename of './test/t3' to '#sql2-temporary' (errno: 152 "Cannot delete a parent row")
alter table t4 drop foreign key dc;
select * from t2;
a b c d e
@@ -1146,17 +1146,17 @@ PRIMARY KEY (c1)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c1,c1);
-ERROR HY000: Can't create table '#sql-temporary' (errno: 150)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 150 "Foreign key constraint is incorrectly formed")
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c1,c2);
-ERROR HY000: Can't create table '#sql-temporary' (errno: 150)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 150 "Foreign key constraint is incorrectly formed")
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c2,c1);
-ERROR HY000: Can't create table '#sql-temporary' (errno: 150)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 150 "Foreign key constraint is incorrectly formed")
ALTER TABLE t1 MODIFY COLUMN c2 BIGINT(12) NOT NULL;
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c1,c2);
-ERROR HY000: Can't create table '#sql-temporary' (errno: 150)
+ERROR HY000: Can't create table '#sql-temporary' (errno: 150 "Foreign key constraint is incorrectly formed")
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c2,c1);
SHOW CREATE TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb-zip.result b/mysql-test/suite/innodb/r/innodb-zip.result
index db7fd6d812c..f794ae7470f 100644
--- a/mysql-test/suite/innodb/r/innodb-zip.result
+++ b/mysql-test/suite/innodb/r/innodb-zip.result
@@ -3,57 +3,57 @@ set global innodb_file_per_table=off;
set global innodb_file_format=`0`;
create table t0(a int primary key) engine=innodb row_format=compressed;
Warnings:
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
create table t00(a int primary key) engine=innodb
key_block_size=4 row_format=compressed;
Warnings:
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
create table t1(a int primary key) engine=innodb row_format=dynamic;
Warnings:
-Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
create table t2(a int primary key) engine=innodb row_format=redundant;
create table t3(a int primary key) engine=innodb row_format=compact;
create table t4(a int primary key) engine=innodb key_block_size=9;
Warnings:
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=9.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=9.
create table t5(a int primary key) engine=innodb
key_block_size=1 row_format=redundant;
Warnings:
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1.
set global innodb_file_per_table=on;
create table t6(a int primary key) engine=innodb
key_block_size=1 row_format=redundant;
Warnings:
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1.
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1.
set global innodb_file_format=`1`;
create table t7(a int primary key) engine=innodb
key_block_size=1 row_format=redundant;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
create table t8(a int primary key) engine=innodb
key_block_size=1 row_format=fixed;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
create table t9(a int primary key) engine=innodb
key_block_size=1 row_format=compact;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
create table t10(a int primary key) engine=innodb
key_block_size=1 row_format=dynamic;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
create table t11(a int primary key) engine=innodb
key_block_size=1 row_format=compressed;
create table t12(a int primary key) engine=innodb
@@ -62,7 +62,7 @@ create table t13(a int primary key) engine=innodb
row_format=compressed;
create table t14(a int primary key) engine=innodb key_block_size=9;
Warnings:
-Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=9.
+Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=9.
SELECT table_schema, table_name, row_format, data_length, index_length
FROM information_schema.tables WHERE engine='innodb';
table_schema table_name row_format data_length index_length
@@ -193,11 +193,11 @@ drop table t1;
set innodb_strict_mode = on;
create table t1 (id int primary key) engine = innodb key_block_size = 0;
create table t2 (id int primary key) engine = innodb key_block_size = 9;
-ERROR HY000: Can't create table 'test.t2' (errno: 1478)
+ERROR HY000: Can't create table 'test.t2' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
-Error 1005 Can't create table 'test.t2' (errno: 1478)
+Warning 140 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
+Error 1005 Can't create table 'test.t2' (errno: 140 "Wrong create options")
create table t3 (id int primary key) engine = innodb key_block_size = 1;
create table t4 (id int primary key) engine = innodb key_block_size = 2;
create table t5 (id int primary key) engine = innodb key_block_size = 4;
@@ -225,25 +225,25 @@ create table t1 (id int primary key) engine = innodb
key_block_size = 8 row_format = compressed;
create table t2 (id int primary key) engine = innodb
key_block_size = 8 row_format = redundant;
-ERROR HY000: Can't create table 'test.t2' (errno: 1478)
+ERROR HY000: Can't create table 'test.t2' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table 'test.t2' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table 'test.t2' (errno: 140 "Wrong create options")
create table t3 (id int primary key) engine = innodb
key_block_size = 8 row_format = compact;
-ERROR HY000: Can't create table 'test.t3' (errno: 1478)
+ERROR HY000: Can't create table 'test.t3' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table 'test.t3' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table 'test.t3' (errno: 140 "Wrong create options")
create table t4 (id int primary key) engine = innodb
key_block_size = 8 row_format = dynamic;
-ERROR HY000: Can't create table 'test.t4' (errno: 1478)
+ERROR HY000: Can't create table 'test.t4' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
-Error 1005 Can't create table 'test.t4' (errno: 1478)
+Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Error 1005 Can't create table 'test.t4' (errno: 140 "Wrong create options")
create table t5 (id int primary key) engine = innodb
key_block_size = 8 row_format = default;
SELECT table_schema, table_name, row_format, data_length, index_length
@@ -254,74 +254,74 @@ test t5 Compressed 8192 0
drop table t1, t5;
create table t1 (id int primary key) engine = innodb
key_block_size = 9 row_format = redundant;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
+Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
create table t2 (id int primary key) engine = innodb
key_block_size = 9 row_format = compact;
-ERROR HY000: Can't create table 'test.t2' (errno: 1478)
+ERROR HY000: Can't create table 'test.t2' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table 'test.t2' (errno: 1478)
+Warning 140 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
+Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Error 1005 Can't create table 'test.t2' (errno: 140 "Wrong create options")
create table t2 (id int primary key) engine = innodb
key_block_size = 9 row_format = dynamic;
-ERROR HY000: Can't create table 'test.t2' (errno: 1478)
+ERROR HY000: Can't create table 'test.t2' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
-Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
-Error 1005 Can't create table 'test.t2' (errno: 1478)
+Warning 140 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
+Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Error 1005 Can't create table 'test.t2' (errno: 140 "Wrong create options")
SELECT table_schema, table_name, row_format, data_length, index_length
FROM information_schema.tables WHERE engine='innodb';
table_schema table_name row_format data_length index_length
set global innodb_file_per_table = off;
create table t1 (id int primary key) engine = innodb key_block_size = 1;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
create table t2 (id int primary key) engine = innodb key_block_size = 2;
-ERROR HY000: Can't create table 'test.t2' (errno: 1478)
+ERROR HY000: Can't create table 'test.t2' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Error 1005 Can't create table 'test.t2' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Error 1005 Can't create table 'test.t2' (errno: 140 "Wrong create options")
create table t3 (id int primary key) engine = innodb key_block_size = 4;
-ERROR HY000: Can't create table 'test.t3' (errno: 1478)
+ERROR HY000: Can't create table 'test.t3' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Error 1005 Can't create table 'test.t3' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Error 1005 Can't create table 'test.t3' (errno: 140 "Wrong create options")
create table t4 (id int primary key) engine = innodb key_block_size = 8;
-ERROR HY000: Can't create table 'test.t4' (errno: 1478)
+ERROR HY000: Can't create table 'test.t4' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Error 1005 Can't create table 'test.t4' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Error 1005 Can't create table 'test.t4' (errno: 140 "Wrong create options")
create table t5 (id int primary key) engine = innodb key_block_size = 16;
-ERROR HY000: Can't create table 'test.t5' (errno: 1478)
+ERROR HY000: Can't create table 'test.t5' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Error 1005 Can't create table 'test.t5' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Error 1005 Can't create table 'test.t5' (errno: 140 "Wrong create options")
create table t6 (id int primary key) engine = innodb row_format = compressed;
-ERROR HY000: Can't create table 'test.t6' (errno: 1478)
+ERROR HY000: Can't create table 'test.t6' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Error 1005 Can't create table 'test.t6' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Error 1005 Can't create table 'test.t6' (errno: 140 "Wrong create options")
create table t7 (id int primary key) engine = innodb row_format = dynamic;
-ERROR HY000: Can't create table 'test.t7' (errno: 1478)
+ERROR HY000: Can't create table 'test.t7' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
-Error 1005 Can't create table 'test.t7' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
+Error 1005 Can't create table 'test.t7' (errno: 140 "Wrong create options")
create table t8 (id int primary key) engine = innodb row_format = compact;
create table t9 (id int primary key) engine = innodb row_format = redundant;
SELECT table_schema, table_name, row_format, data_length, index_length
@@ -333,47 +333,47 @@ drop table t8, t9;
set global innodb_file_per_table = on;
set global innodb_file_format = `0`;
create table t1 (id int primary key) engine = innodb key_block_size = 1;
-ERROR HY000: Can't create table 'test.t1' (errno: 1478)
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Error 1005 Can't create table 'test.t1' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
create table t2 (id int primary key) engine = innodb key_block_size = 2;
-ERROR HY000: Can't create table 'test.t2' (errno: 1478)
+ERROR HY000: Can't create table 'test.t2' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Error 1005 Can't create table 'test.t2' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Error 1005 Can't create table 'test.t2' (errno: 140 "Wrong create options")
create table t3 (id int primary key) engine = innodb key_block_size = 4;
-ERROR HY000: Can't create table 'test.t3' (errno: 1478)
+ERROR HY000: Can't create table 'test.t3' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Error 1005 Can't create table 'test.t3' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Error 1005 Can't create table 'test.t3' (errno: 140 "Wrong create options")
create table t4 (id int primary key) engine = innodb key_block_size = 8;
-ERROR HY000: Can't create table 'test.t4' (errno: 1478)
+ERROR HY000: Can't create table 'test.t4' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Error 1005 Can't create table 'test.t4' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Error 1005 Can't create table 'test.t4' (errno: 140 "Wrong create options")
create table t5 (id int primary key) engine = innodb key_block_size = 16;
-ERROR HY000: Can't create table 'test.t5' (errno: 1478)
+ERROR HY000: Can't create table 'test.t5' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Error 1005 Can't create table 'test.t5' (errno: 1478)
+Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Error 1005 Can't create table 'test.t5' (errno: 140 "Wrong create options")
create table t6 (id int primary key) engine = innodb row_format = compressed;
-ERROR HY000: Can't create table 'test.t6' (errno: 1478)
+ERROR HY000: Can't create table 'test.t6' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
-Error 1005 Can't create table 'test.t6' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Error 1005 Can't create table 'test.t6' (errno: 140 "Wrong create options")
create table t7 (id int primary key) engine = innodb row_format = dynamic;
-ERROR HY000: Can't create table 'test.t7' (errno: 1478)
+ERROR HY000: Can't create table 'test.t7' (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
-Error 1005 Can't create table 'test.t7' (errno: 1478)
+Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
+Error 1005 Can't create table 'test.t7' (errno: 140 "Wrong create options")
create table t8 (id int primary key) engine = innodb row_format = compact;
create table t9 (id int primary key) engine = innodb row_format = redundant;
SELECT table_schema, table_name, row_format, data_length, index_length
diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result
index a7db250d8c7..7307ce9dc24 100644
--- a/mysql-test/suite/innodb/r/innodb.result
+++ b/mysql-test/suite/innodb/r/innodb.result
@@ -2261,7 +2261,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (v varchar(10), c char(10)) row_format=fixed;
Warnings:
-Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2525,13 +2525,13 @@ drop table t1;
set foreign_key_checks=0;
create table t2 (a int primary key, b int, foreign key (b) references t1(a)) engine = innodb;
create table t1(a char(10) primary key, b varchar(20)) engine = innodb;
-ERROR HY000: Can't create table 'test.t1' (errno: 150)
+ERROR HY000: Can't create table 'test.t1' (errno: 150 "Foreign key constraint is incorrectly formed")
set foreign_key_checks=1;
drop table t2;
set foreign_key_checks=0;
create table t1(a varchar(10) primary key) engine = innodb DEFAULT CHARSET=latin1;
create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = innodb DEFAULT CHARSET=utf8;
-ERROR HY000: Can't create table 'test.t2' (errno: 150)
+ERROR HY000: Can't create table 'test.t2' (errno: 150 "Foreign key constraint is incorrectly formed")
set foreign_key_checks=1;
drop table t1;
set foreign_key_checks=0;
@@ -2551,7 +2551,7 @@ set foreign_key_checks=0;
create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = innodb DEFAULT CHARSET=latin1;
create table t3(a varchar(10) primary key) engine = innodb DEFAULT CHARSET=utf8;
rename table t3 to t1;
-ERROR HY000: Error on rename of './test/t3' to './test/t1' (errno: 150)
+ERROR HY000: Error on rename of './test/t3' to './test/t1' (errno: 150 "Foreign key constraint is incorrectly formed")
set foreign_key_checks=1;
drop table t2,t3;
create table t1(a int primary key) row_format=redundant engine=innodb;
@@ -2978,7 +2978,7 @@ INSERT INTO t1 VALUES (1);
INSERT INTO t2 VALUES (1);
ALTER TABLE t2 ADD FOREIGN KEY (a) REFERENCES t1 (a) ON DELETE SET NULL;
ALTER TABLE t2 MODIFY a INT NOT NULL;
-ERROR HY000: Error on rename of '#sql-temporary' to './test/t2' (errno: 150)
+ERROR HY000: Error on rename of '#sql-temporary' to './test/t2' (errno: 150 "Foreign key constraint is incorrectly formed")
DELETE FROM t1;
DROP TABLE t2,t1;
CREATE TABLE t1 (a VARCHAR(5) COLLATE utf8_unicode_ci PRIMARY KEY)
diff --git a/mysql-test/suite/innodb/r/innodb_bug12902967.result b/mysql-test/suite/innodb/r/innodb_bug12902967.result
index da1d94d69f5..5958a8dce31 100644
--- a/mysql-test/suite/innodb/r/innodb_bug12902967.result
+++ b/mysql-test/suite/innodb/r/innodb_bug12902967.result
@@ -1,6 +1,6 @@
create table t1 (f1 integer primary key) engine innodb;
alter table t1 add constraint c1 foreign key (f1) references t1(f1);
-ERROR HY000: Error on rename of '#sql-temporary' to './test/t1' (errno: 150)
+ERROR HY000: Error on rename of '#sql-temporary' to './test/t1' (errno: 150 "Foreign key constraint is incorrectly formed")
InnoDB: has or is referenced in foreign key constraints
InnoDB: which are not compatible with the new table definition.
drop table t1;
diff --git a/mysql-test/suite/innodb/r/innodb_bug13635833.result b/mysql-test/suite/innodb/r/innodb_bug13635833.result
index 6a505bc94f2..a92b57cc824 100644
--- a/mysql-test/suite/innodb/r/innodb_bug13635833.result
+++ b/mysql-test/suite/innodb/r/innodb_bug13635833.result
@@ -37,7 +37,7 @@ SET DEBUG_SYNC='innodb_row_update_for_mysql_begin
SET DEBUG_SYNC='innodb_dml_cascade_dict_unfreeze SIGNAL dict_unfreeze
WAIT_FOR foreign_free_cache';
update ignore t1 set f1 = 20;
-ERROR HY000: Error on rename of './test/t2' to '#sql2-temporary' (errno: 181)
+ERROR HY000: Error on rename of './test/t2' to '#sql2-temporary' (errno: 182 "Table is being used in foreign key check")
SET DEBUG_SYNC='now SIGNAL foreign_free_cache';
drop table t2;
drop table t1;
diff --git a/mysql-test/suite/innodb/r/innodb_bug21704.result b/mysql-test/suite/innodb/r/innodb_bug21704.result
index ffbfa8a337e..3a6b38d50da 100644
--- a/mysql-test/suite/innodb/r/innodb_bug21704.result
+++ b/mysql-test/suite/innodb/r/innodb_bug21704.result
@@ -22,7 +22,7 @@ INSERT INTO t3 VALUES (1,1,1),(2,2,2),(3,3,3);
# Test renaming the column in the referenced table.
ALTER TABLE t1 CHANGE a c INT;
-ERROR HY000: Error on rename of '#sql-temporary' to './test/t1' (errno: 150)
+ERROR HY000: Error on rename of '#sql-temporary' to './test/t1' (errno: 150 "Foreign key constraint is incorrectly formed")
# Ensure that online column rename works.
ALTER TABLE t1 CHANGE b c INT;
affected rows: 3
@@ -31,7 +31,7 @@ info: Records: 3 Duplicates: 0 Warnings: 0
# Test renaming the column in the referencing table
ALTER TABLE t2 CHANGE a c INT;
-ERROR HY000: Error on rename of '#sql-temporary' to './test/t2' (errno: 150)
+ERROR HY000: Error on rename of '#sql-temporary' to './test/t2' (errno: 150 "Foreign key constraint is incorrectly formed")
# Ensure that online column rename works.
ALTER TABLE t2 CHANGE b c INT;
affected rows: 3
@@ -40,9 +40,9 @@ info: Records: 3 Duplicates: 0 Warnings: 0
# Test with self-referential constraints
ALTER TABLE t3 CHANGE a d INT;
-ERROR HY000: Error on rename of '#sql-temporary' to './test/t3' (errno: 150)
+ERROR HY000: Error on rename of '#sql-temporary' to './test/t3' (errno: 150 "Foreign key constraint is incorrectly formed")
ALTER TABLE t3 CHANGE b d INT;
-ERROR HY000: Error on rename of '#sql-temporary' to './test/t3' (errno: 150)
+ERROR HY000: Error on rename of '#sql-temporary' to './test/t3' (errno: 150 "Foreign key constraint is incorrectly formed")
# Ensure that online column rename works.
ALTER TABLE t3 CHANGE c d INT;
affected rows: 3
diff --git a/mysql-test/suite/innodb/r/innodb_bug46000.result b/mysql-test/suite/innodb/r/innodb_bug46000.result
index b27b3d7443b..9ee0c935d71 100644
--- a/mysql-test/suite/innodb/r/innodb_bug46000.result
+++ b/mysql-test/suite/innodb/r/innodb_bug46000.result
@@ -6,7 +6,7 @@ show warnings;
Level Code Message
Warning 1280 Cannot Create Index with name 'GEN_CLUST_INDEX'. The name is reserved for the system default primary index.
Error 1280 Incorrect index name 'GEN_CLUST_INDEX'
-Error 1005 Can't create table 'test.bug46000' (errno: -1)
+Error 1005 Can't create table 'test.bug46000' (errno: -1 "Internal error < 0 (Not system error)")
create table bug46000(id int) engine=innodb;
create index GEN_CLUST_INDEX on bug46000(id);
ERROR 42000: Incorrect index name 'GEN_CLUST_INDEX'
diff --git a/mysql-test/suite/innodb/r/innodb_corrupt_bit.result b/mysql-test/suite/innodb/r/innodb_corrupt_bit.result
index 13b1e24a9ed..e194f67f5c5 100644
--- a/mysql-test/suite/innodb/r/innodb_corrupt_bit.result
+++ b/mysql-test/suite/innodb/r/innodb_corrupt_bit.result
@@ -45,8 +45,8 @@ select z from corrupt_bit_test_Ä;
ERROR HY000: Index corrupt_bit_test_Ä is corrupted
show warnings;
Level Code Message
-Warning 179 InnoDB: Index "idxÄ“" for table "test"."corrupt_bit_test_Ä" is marked as corrupted
-Warning 179 Got error 179 when reading table `test`.`corrupt_bit_test_Ä`
+Warning 180 InnoDB: Index "idxÄ“" for table "test"."corrupt_bit_test_Ä" is marked as corrupted
+Warning 180 Got error 180 when reading table `test`.`corrupt_bit_test_Ä`
Error 1712 Index corrupt_bit_test_Ä is corrupted
insert into corrupt_bit_test_Ä values (10001, "a", 20001, 20001);
select * from corrupt_bit_test_Ä use index(primary) where a = 10001;
diff --git a/mysql-test/suite/innodb/t/binlog_consistent.test b/mysql-test/suite/innodb/t/binlog_consistent.test
index f4babb8bad7..20023871093 100644
--- a/mysql-test/suite/innodb/t/binlog_consistent.test
+++ b/mysql-test/suite/innodb/t/binlog_consistent.test
@@ -72,6 +72,7 @@ connection con3;
--echo # Connection con3
COMMIT;
FLUSH LOGS;
+--source include/wait_for_binlog_checkpoint.inc
connection default;
--echo # Connection default
diff --git a/mysql-test/suite/innodb/t/group_commit_binlog_pos.test b/mysql-test/suite/innodb/t/group_commit_binlog_pos.test
index 72798a68a1e..213dbc9d3d8 100644
--- a/mysql-test/suite/innodb/t/group_commit_binlog_pos.test
+++ b/mysql-test/suite/innodb/t/group_commit_binlog_pos.test
@@ -17,6 +17,19 @@
# Test that we get the correct position when we group commit several
# transactions together.
+# What we really want to test here is what happens when a group of
+# transactions get written only partially to disk inside InnoDB before
+# the crash. But that is hard to test in mysql-test-run automated
+# tests. Instead, we use debug_sync to tightly control when each
+# transaction is written to the redo log. And we set
+# innodb_flush_log_at_trx_commit=3 so that we can write out
+# transactions individually - as with
+# innodb_flush_log_at_trx_commit=1, all commits are written together,
+# as part of a commit_checkpoint.
+# (Note that we do not have to restore innodb_flush_log_at_trx_commit, as
+# we crash the server).
+SET GLOBAL innodb_flush_log_at_trx_commit=3;
+
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=innodb;
INSERT INTO t1 VALUES (0);
diff --git a/mysql-test/suite/innodb/t/group_commit_binlog_pos_no_optimize_thread.test b/mysql-test/suite/innodb/t/group_commit_binlog_pos_no_optimize_thread.test
index e9a234577e2..3ae3c50085d 100644
--- a/mysql-test/suite/innodb/t/group_commit_binlog_pos_no_optimize_thread.test
+++ b/mysql-test/suite/innodb/t/group_commit_binlog_pos_no_optimize_thread.test
@@ -17,6 +17,19 @@
# Test that we get the correct position when we group commit several
# transactions together.
+# What we really want to test here is what happens when a group of
+# transactions get written only partially to disk inside InnoDB before
+# the crash. But that is hard to test in mysql-test-run automated
+# tests. Instead, we use debug_sync to tightly control when each
+# transaction is written to the redo log. And we set
+# innodb_flush_log_at_trx_commit=3 so that we can write out
+# transactions individually - as with
+# innodb_flush_log_at_trx_commit=1, all commits are written together,
+# as part of a commit_checkpoint.
+# (Note that we do not have to restore innodb_flush_log_at_trx_commit, as
+# we crash the server).
+SET GLOBAL innodb_flush_log_at_trx_commit=3;
+
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=innodb;
INSERT INTO t1 VALUES (0);
diff --git a/mysql-test/suite/innodb/t/group_commit_crash.test b/mysql-test/suite/innodb/t/group_commit_crash.test
index 12c92d19244..0a297e3c570 100644
--- a/mysql-test/suite/innodb/t/group_commit_crash.test
+++ b/mysql-test/suite/innodb/t/group_commit_crash.test
@@ -66,7 +66,7 @@ while ($numtests)
# table and binlog should be in sync.
SELECT * FROM t1 ORDER BY id;
--replace_column 2 # 5 #
- SHOW BINLOG EVENTS LIMIT 2,1;
+ SHOW BINLOG EVENTS LIMIT 3,1;
delete from t1;
diff --git a/mysql-test/suite/innodb/t/group_commit_crash_no_optimize_thread.test b/mysql-test/suite/innodb/t/group_commit_crash_no_optimize_thread.test
index 2de09d6b0b6..4214c2a6b03 100644
--- a/mysql-test/suite/innodb/t/group_commit_crash_no_optimize_thread.test
+++ b/mysql-test/suite/innodb/t/group_commit_crash_no_optimize_thread.test
@@ -66,7 +66,7 @@ while ($numtests)
# table and binlog should be in sync.
SELECT * FROM t1 ORDER BY id;
--replace_column 2 # 5 #
- SHOW BINLOG EVENTS LIMIT 2,1;
+ SHOW BINLOG EVENTS LIMIT 3,1;
delete from t1;
diff --git a/mysql-test/suite/innodb/t/innodb-create-options.test b/mysql-test/suite/innodb/t/innodb-create-options.test
index 3215002a37c..12c24820031 100644
--- a/mysql-test/suite/innodb/t/innodb-create-options.test
+++ b/mysql-test/suite/innodb/t/innodb-create-options.test
@@ -76,8 +76,10 @@ SET SESSION innodb_strict_mode = ON;
DROP TABLE IF EXISTS t1;
--echo # 'FIXED' is sent to InnoDB since it is used by MyISAM.
--echo # But it is an invalid mode in InnoDB
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=FIXED;
+--replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
@@ -94,10 +96,10 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
@@ -106,14 +108,20 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
--echo # Test 2) StrictMode=ON, CREATE with each ROW_FORMAT & a valid non-zero KEY_BLOCK_SIZE
--echo # KEY_BLOCK_SIZE is incompatible with COMPACT, REDUNDANT, & DYNAMIC
DROP TABLE IF EXISTS t1;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=1;
+--replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2;
+--replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
+--replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
SHOW WARNINGS;
@@ -133,25 +141,25 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
--echo # Test 3) StrictMode=ON, ALTER with each ROW_FORMAT & a valid non-zero KEY_BLOCK_SIZE
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=1;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=2;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=8;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=16;
SHOW WARNINGS;
@@ -165,26 +173,26 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 KEY_BLOCK_SIZE=2;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 KEY_BLOCK_SIZE=4;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 KEY_BLOCK_SIZE=8;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
SHOW WARNINGS;
@@ -205,20 +213,20 @@ CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=2;
SHOW CREATE TABLE t1;
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW CREATE TABLE t1;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=COMPACT;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
SHOW WARNINGS;
@@ -232,8 +240,10 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
--echo # Test 6) StrictMode=ON, CREATE with an invalid KEY_BLOCK_SIZE.
DROP TABLE IF EXISTS t1;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=9;
+--replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
--echo # Test 7) StrictMode=ON, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and
@@ -241,14 +251,20 @@ SHOW WARNINGS;
--echo # and that they can be set to default values during strict mode.
SET GLOBAL innodb_file_format=Antelope;
DROP TABLE IF EXISTS t1;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=4;
+--replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED;
+--replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC;
+--replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
@@ -260,29 +276,29 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT;
SHOW WARNINGS;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 KEY_BLOCK_SIZE=8;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
SET GLOBAL innodb_file_format=Barracuda;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
SET GLOBAL innodb_file_format=Antelope;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ADD COLUMN f1 INT;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
@@ -296,14 +312,20 @@ SET GLOBAL innodb_file_format=Barracuda;
--echo # values during strict mode.
SET GLOBAL innodb_file_per_table=OFF;
DROP TABLE IF EXISTS t1;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=16;
+--replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED;
+--replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC;
+--replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
@@ -315,20 +337,20 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT;
SHOW WARNINGS;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 KEY_BLOCK_SIZE=1;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=COMPACT;
SHOW WARNINGS;
@@ -343,10 +365,10 @@ SET GLOBAL innodb_file_per_table=ON;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
SET GLOBAL innodb_file_per_table=OFF;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ADD COLUMN f1 INT;
---replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/
+--replace_regex /'[^']*test\.#sql-[0-9a-f_]*'/'#sql-temporary'/ / - .*[0-9]*)/)/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
diff --git a/mysql-test/suite/innodb/t/innodb-zip.test b/mysql-test/suite/innodb/t/innodb-zip.test
index 3db4de3a13b..7e2e2d1d3c9 100644
--- a/mysql-test/suite/innodb/t/innodb-zip.test
+++ b/mysql-test/suite/innodb/t/innodb-zip.test
@@ -182,8 +182,10 @@ set innodb_strict_mode = on;
create table t1 (id int primary key) engine = innodb key_block_size = 0;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb key_block_size = 9;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
@@ -207,19 +209,25 @@ drop table t1, t3, t4, t5, t6, t7, t8, t9, t10, t11;
create table t1 (id int primary key) engine = innodb
key_block_size = 8 row_format = compressed;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb
key_block_size = 8 row_format = redundant;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t3 (id int primary key) engine = innodb
key_block_size = 8 row_format = compact;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t4 (id int primary key) engine = innodb
key_block_size = 8 row_format = dynamic;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
create table t5 (id int primary key) engine = innodb
@@ -230,19 +238,25 @@ FROM information_schema.tables WHERE engine='innodb';
drop table t1, t5;
#test multiple errors
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t1 (id int primary key) engine = innodb
key_block_size = 9 row_format = redundant;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb
key_block_size = 9 row_format = compact;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb
key_block_size = 9 row_format = dynamic;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
SELECT table_schema, table_name, row_format, data_length, index_length
@@ -251,26 +265,40 @@ FROM information_schema.tables WHERE engine='innodb';
#test valid values with innodb_file_per_table unset
set global innodb_file_per_table = off;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t1 (id int primary key) engine = innodb key_block_size = 1;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb key_block_size = 2;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t3 (id int primary key) engine = innodb key_block_size = 4;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t4 (id int primary key) engine = innodb key_block_size = 8;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t5 (id int primary key) engine = innodb key_block_size = 16;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t6 (id int primary key) engine = innodb row_format = compressed;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t7 (id int primary key) engine = innodb row_format = dynamic;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
create table t8 (id int primary key) engine = innodb row_format = compact;
create table t9 (id int primary key) engine = innodb row_format = redundant;
@@ -283,26 +311,40 @@ drop table t8, t9;
set global innodb_file_per_table = on;
set global innodb_file_format = `0`;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t1 (id int primary key) engine = innodb key_block_size = 1;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb key_block_size = 2;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t3 (id int primary key) engine = innodb key_block_size = 4;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t4 (id int primary key) engine = innodb key_block_size = 8;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t5 (id int primary key) engine = innodb key_block_size = 16;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t6 (id int primary key) engine = innodb row_format = compressed;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
+--replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t7 (id int primary key) engine = innodb row_format = dynamic;
+--replace_regex / - .*[0-9]*)/)/
show warnings;
create table t8 (id int primary key) engine = innodb row_format = compact;
create table t9 (id int primary key) engine = innodb row_format = redundant;
diff --git a/mysql-test/suite/maria/maria-connect.result b/mysql-test/suite/maria/maria-connect.result
index ed626a003f5..d79ebbdb86c 100644
--- a/mysql-test/suite/maria/maria-connect.result
+++ b/mysql-test/suite/maria/maria-connect.result
@@ -16,6 +16,7 @@ a
4
SHOW BINLOG EVENTS FROM <start_pos>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Binlog_checkpoint 1 # master-bin.000001
master-bin.000001 # Query 1 # use `test`; CREATE TABLE t1 (a int primary key)
master-bin.000001 # Query 1 # BEGIN
master-bin.000001 # Query 1 # use `test`; insert t1 values (1),(2),(3)
diff --git a/mysql-test/suite/maria/maria-recovery2.result b/mysql-test/suite/maria/maria-recovery2.result
index 149ce5a01af..6e4daa64615 100644
--- a/mysql-test/suite/maria/maria-recovery2.result
+++ b/mysql-test/suite/maria/maria-recovery2.result
@@ -1,4 +1,4 @@
-call mtr.add_suppression("File '.*aria_log.000.*' not found \\(Errcode: 2\\)");
+call mtr.add_suppression("File '.*aria_log.000.*' not found \\(Errcode: 2 .*\\)");
call mtr.add_suppression("Table '.\/mysqltest\/t_corrupted1' is crashed, skipping it. Please repair it with aria_chk -r");
set global aria_log_file_size=4294959104;
drop database if exists mysqltest;
diff --git a/mysql-test/suite/maria/maria-recovery2.test b/mysql-test/suite/maria/maria-recovery2.test
index 8d27d9aaaac..3c0ad75f8a8 100644
--- a/mysql-test/suite/maria/maria-recovery2.test
+++ b/mysql-test/suite/maria/maria-recovery2.test
@@ -5,7 +5,7 @@
--source include/have_debug.inc
--source include/have_maria.inc
-call mtr.add_suppression("File '.*aria_log.000.*' not found \\(Errcode: 2\\)");
+call mtr.add_suppression("File '.*aria_log.000.*' not found \\(Errcode: 2 .*\\)");
call mtr.add_suppression("Table '.\/mysqltest\/t_corrupted1' is crashed, skipping it. Please repair it with aria_chk -r");
set global aria_log_file_size=4294959104;
diff --git a/mysql-test/suite/maria/maria.result b/mysql-test/suite/maria/maria.result
index 888c1c10c5e..6efd87df786 100644
--- a/mysql-test/suite/maria/maria.result
+++ b/mysql-test/suite/maria/maria.result
@@ -1586,7 +1586,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
drop table t1;
create table t1 (v varchar(65535));
-ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 65535. You have to change some columns to TEXT or BLOBs
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 65535. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
set @save_concurrent_insert=@@concurrent_insert;
set global concurrent_insert=1;
create table t1 (a int) ROW_FORMAT=FIXED;
diff --git a/mysql-test/suite/multi_source/info_logs-master.opt b/mysql-test/suite/multi_source/info_logs-master.opt
new file mode 100644
index 00000000000..da4e6d3c455
--- /dev/null
+++ b/mysql-test/suite/multi_source/info_logs-master.opt
@@ -0,0 +1 @@
+--relay-log=relay.bin --relay-log-info=relay.bin.info
diff --git a/mysql-test/suite/multi_source/info_logs.result b/mysql-test/suite/multi_source/info_logs.result
new file mode 100644
index 00000000000..b72e934ba57
--- /dev/null
+++ b/mysql-test/suite/multi_source/info_logs.result
@@ -0,0 +1,115 @@
+#
+# List of files matching '*info*' pattern before starting any slaves
+multi-master.info
+# End of list
+#
+# Contents of multi-master.info
+# EOF
+#
+change master 'master1' to
+master_port=MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+start slave 'master1';
+set default_master_connection = 'master1';
+include/wait_for_slave_to_start.inc
+#
+# List of files matching '*info*' pattern while 'master1' is running
+master-master1.info
+multi-master.info
+relay.bin-master1.info
+# End of list
+#
+# Contents of multi-master.info
+master1
+# EOF
+#
+change master 'MASTER 2.2' to
+master_port=MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+start slave 'MASTER 2.2';
+set default_master_connection = 'MASTER 2.2';
+include/wait_for_slave_to_start.inc
+#
+# List of files matching '*info*' pattern
+# while 'master1' and 'MASTER 2.2' are running
+master-master1.info
+master-master@00202@002e2.info
+multi-master.info
+relay.bin-master1.info
+relay.bin-master@00202@002e2.info
+# End of list
+#
+# Contents of multi-master.info
+master1
+MASTER 2.2
+# EOF
+#
+stop slave 'master1';
+set default_master_connection = 'master1';
+include/wait_for_slave_to_stop.inc
+reset slave 'master1' all;
+#
+# List of files matching '*info*' pattern
+# after 'master1' was completely reset, 'MASTER 2.2' still running
+master-master@00202@002e2.info
+multi-master.info
+relay.bin-master@00202@002e2.info
+# End of list
+#
+# Contents of multi-master.info
+MASTER 2.2
+# EOF
+#
+set default_master_connection = '';
+change master to
+master_port=MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+start slave;
+include/wait_for_slave_to_start.inc
+#
+# List of files matching '*info*' pattern
+# while 'MASTER 2.2' and '' are running
+master-master@00202@002e2.info
+master.info
+multi-master.info
+relay.bin-master@00202@002e2.info
+relay.bin.info
+# End of list
+#
+# Contents of multi-master.info
+MASTER 2.2
+# EOF
+#
+show all slaves status;
+Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period
+ Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 286 relay.000002 572 master-bin.000001 Yes Yes 0 0 286 857 None 0 No 0 No 0 0 1 0 1073741824 6 0 60.000
+MASTER 2.2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 286 relay-master@00202@002e2.000002 572 master-bin.000001 Yes Yes 0 0 286 876 None 0 No 0 No 0 0 2 0 1073741824 6 0 60.000
+include/wait_for_slave_to_start.inc
+set default_master_connection = 'MASTER 2.2';
+include/wait_for_slave_to_start.inc
+set default_master_connection = '';
+show all slaves status;
+Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period
+ Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 286 relay.000004 532 master-bin.000001 Yes Yes 0 0 286 817 None 0 No 0 No 0 0 1 0 1073741824 6 0 60.000
+MASTER 2.2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 286 relay-master@00202@002e2.000004 532 master-bin.000001 Yes Yes 0 0 286 836 None 0 No 0 No 0 0 2 0 1073741824 6 0 60.000
+#
+# List of files matching '*info*' pattern
+# after slave server restart
+# while 'MASTER 2.2' and '' are running
+master-master@00202@002e2.info
+master.info
+multi-master.info
+relay.bin-master@00202@002e2.info
+relay.bin.info
+# End of list
+#
+# Contents of multi-master.info
+MASTER 2.2
+# EOF
+#
+include/reset_master_slave.inc
+include/reset_master_slave.inc
+include/reset_master_slave.inc
diff --git a/mysql-test/suite/multi_source/info_logs.test b/mysql-test/suite/multi_source/info_logs.test
new file mode 100644
index 00000000000..73e6ee73722
--- /dev/null
+++ b/mysql-test/suite/multi_source/info_logs.test
@@ -0,0 +1,178 @@
+#
+# Check log files with multi-source
+#
+
+--source include/not_embedded.inc
+
+--connect (slave,127.0.0.1,root,,,$SERVER_MYPORT_3)
+
+--let $datadir = `SELECT @@datadir`
+
+#
+# Check creation and updating of multi-source *info* logs
+#
+
+--echo #
+--echo # List of files matching '*info*' pattern before starting any slaves
+--list_files $datadir *info*
+--echo # End of list
+--echo #
+--echo # Contents of multi-master.info
+--cat_file $datadir/multi-master.info
+--echo # EOF
+--echo #
+
+# Start replication from the first master
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+eval change master 'master1' to
+master_port=$SERVER_MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+
+start slave 'master1';
+set default_master_connection = 'master1';
+--source include/wait_for_slave_to_start.inc
+
+# Check the files
+
+--echo #
+--echo # List of files matching '*info*' pattern while 'master1' is running
+--list_files $datadir *info*
+--echo # End of list
+--echo #
+--echo # Contents of multi-master.info
+--cat_file $datadir/multi-master.info
+--echo # EOF
+--echo #
+
+# Start replication from the second master
+
+--replace_result $SERVER_MYPORT_2 MYPORT_2
+eval change master 'MASTER 2.2' to
+master_port=$SERVER_MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+
+start slave 'MASTER 2.2';
+set default_master_connection = 'MASTER 2.2';
+--source include/wait_for_slave_to_start.inc
+
+--connect (master2,127.0.0.1,root,,,$SERVER_MYPORT_2)
+--save_master_pos
+
+--connection slave
+--sync_with_master 0,'MASTER 2.2'
+
+# Check the files
+
+--echo #
+--echo # List of files matching '*info*' pattern
+--echo # while 'master1' and 'MASTER 2.2' are running
+--list_files $datadir *info*
+--echo # End of list
+--echo #
+--echo # Contents of multi-master.info
+--cat_file $datadir/multi-master.info
+--echo # EOF
+--echo #
+
+# Remove master1 configuration
+
+stop slave 'master1';
+set default_master_connection = 'master1';
+--source include/wait_for_slave_to_stop.inc
+reset slave 'master1' all;
+
+# Check the files
+
+--echo #
+--echo # List of files matching '*info*' pattern
+--echo # after 'master1' was completely reset, 'MASTER 2.2' still running
+--list_files $datadir *info*
+--echo # End of list
+--echo #
+--echo # Contents of multi-master.info
+--cat_file $datadir/multi-master.info
+--echo # EOF
+--echo #
+
+# Start replication from the first master,
+# now with the default empty name
+
+set default_master_connection = '';
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+eval change master to
+master_port=$SERVER_MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+
+start slave;
+--source include/wait_for_slave_to_start.inc
+
+--connect (master1,127.0.0.1,root,,,$SERVER_MYPORT_1)
+--save_master_pos
+
+--connection slave
+--sync_with_master
+
+# Check the files
+
+--echo #
+--echo # List of files matching '*info*' pattern
+--echo # while 'MASTER 2.2' and '' are running
+--list_files $datadir *info*
+--echo # End of list
+--echo #
+--echo # Contents of multi-master.info
+--cat_file $datadir/multi-master.info
+--echo # EOF
+--echo #
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1 $SERVER_MYPORT_2 MYPORT_2
+show all slaves status;
+
+# Restart the slave server
+
+--enable_reconnect
+--append_file $MYSQLTEST_VARDIR/tmp/mysqld.3.expect
+restart
+EOF
+--shutdown_server 60
+--source include/wait_until_connected_again.inc
+--source include/wait_for_slave_to_start.inc
+set default_master_connection = 'MASTER 2.2';
+--source include/wait_for_slave_to_start.inc
+set default_master_connection = '';
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1 $SERVER_MYPORT_2 MYPORT_2
+show all slaves status;
+
+# Check the files
+
+--echo #
+--echo # List of files matching '*info*' pattern
+--echo # after slave server restart
+--echo # while 'MASTER 2.2' and '' are running
+--list_files $datadir *info*
+--echo # End of list
+--echo #
+--echo # Contents of multi-master.info
+--cat_file $datadir/multi-master.info
+--echo # EOF
+--echo #
+
+
+# Cleanup
+
+--source reset_master_slave.inc
+--disconnect slave
+
+--connection master1
+--source reset_master_slave.inc
+--disconnect master1
+
+--connection master2
+--source reset_master_slave.inc
+--disconnect master2
+
diff --git a/mysql-test/suite/multi_source/multisource.result b/mysql-test/suite/multi_source/multisource.result
new file mode 100644
index 00000000000..c4fb7123e81
--- /dev/null
+++ b/mysql-test/suite/multi_source/multisource.result
@@ -0,0 +1,140 @@
+change master 'master1' to
+master_port=MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+start slave 'master1';
+set default_master_connection = 'master1';
+include/wait_for_slave_to_start.inc
+show slave 'master1' status;
+Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
+Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 286 mysqld-relay-bin-master1.000002 572 master-bin.000001 Yes Yes 0 0 286 876 None 0 No 0 No 0 0 1
+show slave status;
+Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
+Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 286 mysqld-relay-bin-master1.000002 572 master-bin.000001 Yes Yes 0 0 286 876 None 0 No 0 No 0 0 1
+show all slaves status;
+Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period
+master1 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 286 mysqld-relay-bin-master1.000002 572 master-bin.000001 Yes Yes 0 0 286 876 None 0 No 0 No 0 0 1 0 1073741824 6 0 60.000
+drop database if exists db1;
+create database db1;
+use db1;
+create table t1 (i int auto_increment, f1 varchar(16), primary key pk (i,f1)) engine=MyISAM;
+insert into t1 (f1) values ('one'),('two');
+select * from db1.t1;
+i f1
+1 one
+2 two
+# List of relay log files in the datadir
+mysqld-relay-bin-master1.000001
+mysqld-relay-bin-master1.000002
+mysqld-relay-bin-master1.index
+show relaylog events;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-relay-bin-master1.000001 4 Format_desc 3 246 Server version
+mysqld-relay-bin-master1.000001 246 Rotate 3 304 mysqld-relay-bin-master1.000002;pos=4
+show relaylog events in 'mysqld-relay-bin-master1.000002';
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-relay-bin-master1.000002 4 Format_desc 3 246 Server version
+mysqld-relay-bin-master1.000002 246 Rotate 1 0 master-bin.000001;pos=4
+mysqld-relay-bin-master1.000002 290 Format_desc 1 246 Server version
+mysqld-relay-bin-master1.000002 532 Binlog_checkpoint 1 286 master-bin.000001
+mysqld-relay-bin-master1.000002 572 Query 1 375 drop database if exists db1
+mysqld-relay-bin-master1.000002 661 Query 1 456 create database db1
+mysqld-relay-bin-master1.000002 742 Query 1 609 use `db1`; create table t1 (i int auto_increment, f1 varchar(16), primary key pk (i,f1)) engine=MyISAM
+mysqld-relay-bin-master1.000002 895 Query 1 676 BEGIN
+mysqld-relay-bin-master1.000002 962 Intvar 1 704 INSERT_ID=1
+mysqld-relay-bin-master1.000002 990 Query 1 808 use `db1`; insert into t1 (f1) values ('one'),('two')
+mysqld-relay-bin-master1.000002 1094 Query 1 876 COMMIT
+change master 'master1' to
+master_port=MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+ERROR HY000: This operation cannot be performed as you have a running slave 'master1'; run STOP SLAVE 'master1' first
+change master to
+master_port=MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+ERROR HY000: This operation cannot be performed as you have a running slave 'master1'; run STOP SLAVE 'master1' first
+change master 'master2' to
+master_port=MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+ERROR HY000: Connection 'master2' conflicts with existing connection 'master1'
+set default_master_connection = '';
+change master to
+master_port=MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+start slave;
+include/wait_for_slave_to_start.inc
+show all slaves status;
+Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period
+ Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 286 mysqld-relay-bin.000002 572 master-bin.000001 Yes Yes 0 0 286 868 None 0 No 0 No 0 0 2 0 1073741824 6 0 60.000
+master1 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 876 mysqld-relay-bin-master1.000002 1162 master-bin.000001 Yes Yes 0 0 876 1466 None 0 No 0 No 0 0 1 0 1073741824 13 0 60.000
+insert into t1 (f1) values ('three');
+drop database if exists db2;
+create database db2;
+use db2;
+create table t1 (pk int auto_increment primary key, f1 int) engine=InnoDB;
+begin;
+insert into t1 (f1) values (1),(2);
+select * from db1.t1;
+i f1
+1 one
+2 two
+3 three
+select * from db2.t1;
+pk f1
+commit;
+select * from db2.t1;
+pk f1
+1 1
+2 2
+flush logs;
+purge binary logs to 'master-bin.000002';
+show binary logs;
+Log_name File_size
+master-bin.000002 326
+insert into t1 (f1) values ('four');
+create table db1.t3 (f1 int) engine=InnoDB;
+show all slaves status;
+Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period
+ Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 809 mysqld-relay-bin.000002 1095 master-bin.000001 Yes Yes 0 0 809 1391 None 0 No 0 No 0 0 2 0 1073741824 13 0 60.000
+master1 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000002 690 mysqld-relay-bin-master1.000004 976 master-bin.000002 Yes Yes 0 0 690 1324 None 0 No 0 No 0 0 1 0 1073741824 31 0 60.000
+select * from db1.t1;
+i f1
+1 one
+2 two
+3 three
+4 four
+show relaylog events;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-relay-bin.000001 4 Format_desc 3 246 Server version
+mysqld-relay-bin.000001 246 Rotate 3 296 mysqld-relay-bin.000002;pos=4
+show relaylog events in 'mysqld-relay-bin.000002';
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-relay-bin.000002 4 Format_desc 3 246 Server version
+mysqld-relay-bin.000002 246 Rotate 2 0 master-bin.000001;pos=4
+mysqld-relay-bin.000002 290 Format_desc 2 246 Server version
+mysqld-relay-bin.000002 532 Binlog_checkpoint 2 286 master-bin.000001
+mysqld-relay-bin.000002 572 Query 2 375 drop database if exists db2
+mysqld-relay-bin.000002 661 Query 2 456 create database db2
+mysqld-relay-bin.000002 742 Query 2 591 use `db2`; create table t1 (pk int auto_increment primary key, f1 int) engine=InnoDB
+mysqld-relay-bin.000002 877 Query 2 658 BEGIN
+mysqld-relay-bin.000002 944 Intvar 2 686 INSERT_ID=1
+mysqld-relay-bin.000002 972 Query 2 782 use `db2`; insert into t1 (f1) values (1),(2)
+mysqld-relay-bin.000002 1068 Xid 2 809 COMMIT /* xid=<num> */
+stop slave io_thread;
+show status like 'Slave_running';
+Variable_name Value
+Slave_running OFF
+set default_master_connection = 'master1';
+show status like 'Slave_running';
+Variable_name Value
+Slave_running ON
+drop database db1;
+drop database db2;
+include/reset_master_slave.inc
+drop database db1;
+include/reset_master_slave.inc
+drop database db2;
+include/reset_master_slave.inc
diff --git a/mysql-test/suite/multi_source/multisource.test b/mysql-test/suite/multi_source/multisource.test
new file mode 100644
index 00000000000..7a9ee166ec2
--- /dev/null
+++ b/mysql-test/suite/multi_source/multisource.test
@@ -0,0 +1,230 @@
+#
+# Test basic replication functionality
+# in multi-source setup
+#
+
+--source include/not_embedded.inc
+--source include/have_innodb.inc
+
+--connect (slave,127.0.0.1,root,,,$SERVER_MYPORT_3)
+
+# Start replication from the first master
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+eval change master 'master1' to
+master_port=$SERVER_MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+
+start slave 'master1';
+set default_master_connection = 'master1';
+--source include/wait_for_slave_to_start.inc
+
+--connect (master1,127.0.0.1,root,,,$SERVER_MYPORT_1)
+--save_master_pos
+
+--connection slave
+--sync_with_master 0,'master1'
+
+# Here and further: add an extra check on SQL thread status
+# as the normal sync is not always enough
+--source wait_for_sql_thread_read_all.inc
+
+# each of the 3 commands should produce
+# 'master1' status
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+show slave 'master1' status;
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+show slave status;
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+show all slaves status;
+
+
+# Check that replication actually works
+
+--connection master1
+
+--disable_warnings
+drop database if exists db1;
+--enable_warnings
+create database db1;
+use db1;
+create table t1 (i int auto_increment, f1 varchar(16), primary key pk (i,f1)) engine=MyISAM;
+insert into t1 (f1) values ('one'),('two');
+--save_master_pos
+
+--connection slave
+--sync_with_master 0,'master1'
+
+--sorted_result
+select * from db1.t1;
+
+--let $datadir = `SELECT @@datadir`
+
+--echo # List of relay log files in the datadir
+--list_files $datadir mysqld-relay-bin-master1.*
+
+# Check that relay logs are recognizable
+
+--replace_regex /Server ver:.*/Server version/ /xid=[0-9]+/xid=<num>/
+show relaylog events;
+--replace_regex /Server ver:.*/Server version/ /xid=[0-9]+/xid=<num>/
+show relaylog events in 'mysqld-relay-bin-master1.000002';
+
+
+# Try to configure connection with the same name again,
+# should get an error because the slave is running
+
+--replace_result $SERVER_MYPORT_2 MYPORT_2
+--error ER_SLAVE_MUST_STOP
+eval change master 'master1' to
+master_port=$SERVER_MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+
+# Try to configure using the default connection name
+# (which is 'master1' at the moment),
+# again, should get an error
+
+--replace_result $SERVER_MYPORT_2 MYPORT_2
+--error ER_SLAVE_MUST_STOP
+eval change master to
+master_port=$SERVER_MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+
+# Try to configure a connection with the same master
+# using a different name, should get a conflict
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+--error ER_CONNECTION_ALREADY_EXISTS
+eval change master 'master2' to
+master_port=$SERVER_MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+
+
+# Set up a proper 'default' connection to master2
+
+set default_master_connection = '';
+
+--replace_result $SERVER_MYPORT_2 MYPORT_2
+eval change master to
+master_port=$SERVER_MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+
+start slave;
+--source include/wait_for_slave_to_start.inc
+
+--source wait_for_sql_thread_read_all.inc
+
+# See both connections in the same status output
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1 $SERVER_MYPORT_2 MYPORT_2
+show all slaves status;
+
+# Check that replication from two servers actually works
+
+--connection master1
+
+insert into t1 (f1) values ('three');
+--save_master_pos
+
+--connect (master2,127.0.0.1,root,,,$SERVER_MYPORT_2)
+
+--disable_warnings
+drop database if exists db2;
+--enable_warnings
+create database db2;
+use db2;
+create table t1 (pk int auto_increment primary key, f1 int) engine=InnoDB;
+begin;
+insert into t1 (f1) values (1),(2);
+
+--connection slave
+--sync_with_master 0,'master1'
+
+--connection master2
+--save_master_pos
+
+--connection slave
+--sync_with_master 0
+--sorted_result
+select * from db1.t1;
+select * from db2.t1;
+
+--connection master2
+commit;
+--save_master_pos
+
+--connection slave
+--sync_with_master 0
+--sorted_result
+select * from db2.t1;
+
+# Flush and purge logs on one master,
+# make sure slaves don't get confused
+
+--connection master1
+flush logs;
+--source include/wait_for_binlog_checkpoint.inc
+--save_master_pos
+--connection slave
+--sync_with_master 0, 'master1'
+
+--connection master1
+purge binary logs to 'master-bin.000002';
+show binary logs;
+insert into t1 (f1) values ('four');
+create table db1.t3 (f1 int) engine=InnoDB;
+--save_master_pos
+
+--connection slave
+--sync_with_master 0,'master1'
+
+--source wait_for_sql_thread_read_all.inc
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1 $SERVER_MYPORT_2 MYPORT_2
+show all slaves status;
+
+--sorted_result
+select * from db1.t1;
+
+# This should show relay log events for the default master
+# (the one with the empty name)
+--replace_regex /Server ver:.*/Server version/ /xid=[0-9]+/xid=<num>/
+show relaylog events;
+--replace_regex /Server ver:.*/Server version/ /xid=[0-9]+/xid=<num>/
+show relaylog events in 'mysqld-relay-bin.000002';
+
+# Make sure we don't lose control over replication connections
+# after reconnecting to the slave
+
+--disconnect slave
+--connect (slave,127.0.0.1,root,,,$SERVER_MYPORT_3)
+
+stop slave io_thread;
+show status like 'Slave_running';
+set default_master_connection = 'master1';
+show status like 'Slave_running';
+
+# Cleanup
+
+drop database db1;
+drop database db2;
+
+--source reset_master_slave.inc
+--disconnect slave
+
+--connection master1
+drop database db1;
+--source reset_master_slave.inc
+--disconnect master1
+
+--connection master2
+drop database db2;
+--source reset_master_slave.inc
+--disconnect master2
+
diff --git a/mysql-test/suite/multi_source/my.cnf b/mysql-test/suite/multi_source/my.cnf
new file mode 100644
index 00000000000..826967b52f9
--- /dev/null
+++ b/mysql-test/suite/multi_source/my.cnf
@@ -0,0 +1,25 @@
+# cat t/multisource1.cnf
+!include include/default_mysqld.cnf
+!include include/default_client.cnf
+
+[mysqld.1]
+server-id=1
+log-bin=master-bin
+log-warnings=2
+
+[mysqld.2]
+server-id=2
+log-bin=master-bin
+log-warnings=2
+
+[mysqld.3]
+server-id=3
+log-warnings=2
+
+[ENV]
+SERVER_MYPORT_1= @mysqld.1.port
+SERVER_MYSOCK_1= @mysqld.1.socket
+SERVER_MYPORT_2= @mysqld.2.port
+SERVER_MYSOCK_2= @mysqld.2.socket
+SERVER_MYPORT_3= @mysqld.3.port
+SERVER_MYSOCK_3= @mysqld.3.socket
diff --git a/mysql-test/suite/multi_source/relaylog_events.result b/mysql-test/suite/multi_source/relaylog_events.result
new file mode 100644
index 00000000000..1f3b57b045d
--- /dev/null
+++ b/mysql-test/suite/multi_source/relaylog_events.result
@@ -0,0 +1,27 @@
+change master 'master1' to
+master_port=MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+start slave 'master1';
+set default_master_connection = 'master1';
+include/wait_for_slave_to_start.inc
+drop table if exists t1;
+create table t1 (i int) engine=MyISAM;
+mysqld-relay-bin-master1.000001
+mysqld-relay-bin-master1.000002
+mysqld-relay-bin-master1.index
+show relaylog events in 'mysqld-relay-bin-master1.000002';
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-relay-bin-master1.000002 4 Format_desc 3 246 Server version
+mysqld-relay-bin-master1.000002 246 Rotate 1 0 master-bin.000001;pos=4
+mysqld-relay-bin-master1.000002 290 Format_desc 1 246 Server version
+mysqld-relay-bin-master1.000002 532 Binlog_checkpoint 1 286 master-bin.000001
+mysqld-relay-bin-master1.000002 572 Query 1 400 use `test`; DROP TABLE IF EXISTS `t1` /* generated by server */
+mysqld-relay-bin-master1.000002 686 Query 1 500 use `test`; create table t1 (i int) engine=MyISAM
+show relaylog events;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-relay-bin-master1.000001 4 Format_desc 3 246 Server version
+mysqld-relay-bin-master1.000001 246 Rotate 3 304 mysqld-relay-bin-master1.000002;pos=4
+drop table t1;
+include/reset_master_slave.inc
+include/reset_master_slave.inc
diff --git a/mysql-test/suite/multi_source/relaylog_events.test b/mysql-test/suite/multi_source/relaylog_events.test
new file mode 100644
index 00000000000..ba13a4dd7e9
--- /dev/null
+++ b/mysql-test/suite/multi_source/relaylog_events.test
@@ -0,0 +1,51 @@
+#
+# Check that SHOW RELAYLOG EVENTS can be used
+# for a named master connection
+#
+
+--source include/not_embedded.inc
+
+--connect (slave,127.0.0.1,root,,,$SERVER_MYPORT_3)
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+eval change master 'master1' to
+master_port=$SERVER_MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+
+start slave 'master1';
+set default_master_connection = 'master1';
+--source include/wait_for_slave_to_start.inc
+
+--connect (master1,127.0.0.1,root,,,$SERVER_MYPORT_1)
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+create table t1 (i int) engine=MyISAM;
+--save_master_pos
+
+--connection slave
+--sync_with_master 0,'master1'
+
+--let $datadir = `SELECT @@datadir`
+--list_files $datadir mysqld-relay-bin-master1.*
+
+--replace_regex /Server ver:.*/Server version/
+show relaylog events in 'mysqld-relay-bin-master1.000002';
+--replace_regex /Server ver:.*/Server version/
+show relaylog events;
+
+--connection master1
+drop table t1;
+--save_master_pos
+
+--connection slave
+--sync_with_master 0,'master1'
+
+--source reset_master_slave.inc
+--disconnect slave
+
+--connection master1
+--source reset_master_slave.inc
+--disconnect master1
+
diff --git a/mysql-test/suite/multi_source/reset_master_slave.inc b/mysql-test/suite/multi_source/reset_master_slave.inc
new file mode 100644
index 00000000000..63ba3ee00af
--- /dev/null
+++ b/mysql-test/suite/multi_source/reset_master_slave.inc
@@ -0,0 +1,29 @@
+#
+# The include file runs RESET ALL for every replication connection
+# currently present in SHOW FULL SLAVE STATUS output on the server,
+# and also runs RESET MASTER on the same server.
+#
+
+--let $include_filename= reset_master_slave.inc
+--source include/begin_include_file.inc
+
+--disable_query_log
+--disable_result_log
+--disable_warnings
+--let $default_master = `SELECT @@default_master_connection`
+--let $con_name = query_get_value(show all slaves status, Connection_name, 1)
+while ($con_name != 'No such row')
+{
+ eval set default_master_connection = '$con_name';
+ stop slave;
+ --source include/wait_for_slave_to_stop.inc
+ reset slave all;
+ --let $con_name = query_get_value(show all slaves status, Connection_name, 1)
+}
+
+--error 0,ER_FLUSH_MASTER_BINLOG_CLOSED
+reset master;
+eval set default_master_connection = '$default_master';
+
+--source include/end_include_file.inc
+
diff --git a/mysql-test/suite/multi_source/reset_slave.result b/mysql-test/suite/multi_source/reset_slave.result
new file mode 100644
index 00000000000..0c2777f7958
--- /dev/null
+++ b/mysql-test/suite/multi_source/reset_slave.result
@@ -0,0 +1,28 @@
+change master 'master1' to
+master_port=MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+start slave 'master1';
+set default_master_connection = 'master1';
+include/wait_for_slave_to_start.inc
+drop table if exists t1;
+create table t1 (i int) engine=MyISAM;
+insert into t1 values (1),(2);
+stop slave 'master1';
+show slave 'master1' status;
+Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
+ 127.0.0.1 root MYPORT_1 60 master-bin.000001 729 mysqld-relay-bin-master1.000002 1015 master-bin.000001 No No 0 0 729 1319 None 0 No NULL No 0 0 1
+mysqld-relay-bin-master1.000001
+mysqld-relay-bin-master1.000002
+mysqld-relay-bin-master1.index
+reset slave 'master1';
+show slave 'master1' status;
+Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
+ 127.0.0.1 root MYPORT_1 60 4 1015 No No 0 0 0 1319 None 0 No NULL No 0 0 1
+reset slave 'master1' all;
+show slave 'master1' status;
+ERROR HY000: There is no master connection 'master1'
+drop table t1;
+include/reset_master_slave.inc
+drop table t1;
+include/reset_master_slave.inc
diff --git a/mysql-test/suite/multi_source/reset_slave.test b/mysql-test/suite/multi_source/reset_slave.test
new file mode 100644
index 00000000000..bcc0560fe5b
--- /dev/null
+++ b/mysql-test/suite/multi_source/reset_slave.test
@@ -0,0 +1,68 @@
+#
+# Check RESET SLAVE [name] [ALL] for multi-source replication
+#
+
+--source include/not_embedded.inc
+
+--connect (slave,127.0.0.1,root,,,$SERVER_MYPORT_3)
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+eval change master 'master1' to
+master_port=$SERVER_MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+
+start slave 'master1';
+
+set default_master_connection = 'master1';
+--source include/wait_for_slave_to_start.inc
+
+--connect (master1,127.0.0.1,root,,,$SERVER_MYPORT_1)
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+create table t1 (i int) engine=MyISAM;
+insert into t1 values (1),(2);
+
+--save_master_pos
+
+--connection slave
+
+--sync_with_master 0,'master1'
+stop slave 'master1';
+
+--wait_for_slave_to_stop
+
+--let $datadir = `SELECT @@datadir`
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+show slave 'master1' status;
+--list_files $datadir mysqld*
+
+reset slave 'master1';
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+show slave 'master1' status;
+--list_files $datadir mysqld*
+
+reset slave 'master1' all;
+
+--error WARN_NO_MASTER_INFO
+show slave 'master1' status;
+--list_files $datadir mysqld*
+
+# Cleanup
+
+drop table t1;
+--source reset_master_slave.inc
+--disconnect slave
+
+--connection master1
+drop table t1;
+--source reset_master_slave.inc
+--disconnect master1
+
+
+
diff --git a/mysql-test/suite/multi_source/simple.result b/mysql-test/suite/multi_source/simple.result
new file mode 100644
index 00000000000..4018573d074
--- /dev/null
+++ b/mysql-test/suite/multi_source/simple.result
@@ -0,0 +1,77 @@
+change master 'slave1' to master_port=MYPORT_1, master_host='127.0.0.1', master_user='root';
+change master 'slave2' to master_port=MYPORT_2, master_host='127.0.0.1', master_user='root';
+start slave 'slave1';
+set default_master_connection = 'slave1';
+include/wait_for_slave_to_start.inc
+set default_master_connection = 'slave2';
+start all slaves;
+Warnings:
+Note 1937 SLAVE 'slave2' started
+include/wait_for_slave_to_start.inc
+set default_master_connection = '';
+show all slaves status;
+Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period
+slave1 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 286 mysqld-relay-bin-slave1.000002 572 master-bin.000001 Yes Yes 0 0 286 875 None 0 No 0 No 0 0 1 0 1073741824 6 0 60.000
+slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 286 mysqld-relay-bin-slave2.000002 572 master-bin.000001 Yes Yes 0 0 286 875 None 0 No 0 No 0 0 2 0 1073741824 6 0 60.000
+start all slaves;
+stop slave 'slave1';
+show slave 'slave1' status;
+Slave_IO_State
+Master_Host 127.0.0.1
+Master_User root
+Master_Port MYPORT_1
+Connect_Retry 60
+Master_Log_File master-bin.000001
+Read_Master_Log_Pos 286
+Relay_Log_File mysqld-relay-bin-slave1.000002
+Relay_Log_Pos 572
+Relay_Master_Log_File master-bin.000001
+Slave_IO_Running No
+Slave_SQL_Running No
+Replicate_Do_DB
+Replicate_Ignore_DB
+Replicate_Do_Table
+Replicate_Ignore_Table
+Replicate_Wild_Do_Table
+Replicate_Wild_Ignore_Table
+Last_Errno 0
+Last_Error
+Skip_Counter 0
+Exec_Master_Log_Pos 286
+Relay_Log_Space 875
+Until_Condition None
+Until_Log_File
+Until_Log_Pos 0
+Master_SSL_Allowed No
+Master_SSL_CA_File
+Master_SSL_CA_Path
+Master_SSL_Cert
+Master_SSL_Cipher
+Master_SSL_Key
+Seconds_Behind_Master NULL
+Master_SSL_Verify_Server_Cert No
+Last_IO_Errno 0
+Last_IO_Error
+Last_SQL_Errno 0
+Last_SQL_Error
+Replicate_Ignore_Server_Ids
+Master_Server_Id 1
+reset slave 'slave1';
+show all slaves status;
+Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period
+slave1 127.0.0.1 root MYPORT_1 60 4 572 No No 0 0 0 875 None 0 No NULL No 0 0 1 0 1073741824 6 0 60.000
+slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 286 mysqld-relay-bin-slave2.000002 572 master-bin.000001 Yes Yes 0 0 286 875 None 0 No 0 No 0 0 2 0 1073741824 6 0 60.000
+reset slave 'slave1' all;
+show all slaves status;
+Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period
+slave2 Slave has read all relay log; waiting for the slave I/O thread to update it Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 286 mysqld-relay-bin-slave2.000002 572 master-bin.000001 Yes Yes 0 0 286 875 None 0 No 0 No 0 0 2 0 1073741824 6 0 60.000
+stop all slaves;
+Warnings:
+Note 1938 SLAVE 'slave2' stopped
+show all slaves status;
+Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period
+slave2 127.0.0.1 root MYPORT_2 60 master-bin.000001 286 mysqld-relay-bin-slave2.000002 572 master-bin.000001 No No 0 0 286 875 None 0 No NULL No 0 0 2 0 1073741824 6 0 60.000
+stop all slaves;
+include/reset_master_slave.inc
+include/reset_master_slave.inc
+include/reset_master_slave.inc
diff --git a/mysql-test/suite/multi_source/simple.test b/mysql-test/suite/multi_source/simple.test
new file mode 100644
index 00000000000..915cbd73941
--- /dev/null
+++ b/mysql-test/suite/multi_source/simple.test
@@ -0,0 +1,73 @@
+#
+# Simple multi-master test
+#
+
+--source include/not_embedded.inc
+
+--connect (slave,127.0.0.1,root,,,$SERVER_MYPORT_3)
+--connect (master1,127.0.0.1,root,,,$SERVER_MYPORT_1)
+--connect (master2,127.0.0.1,root,,,$SERVER_MYPORT_2)
+--connection slave
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+eval change master 'slave1' to master_port=$SERVER_MYPORT_1, master_host='127.0.0.1', master_user='root';
+--replace_result $SERVER_MYPORT_2 MYPORT_2
+eval change master 'slave2' to master_port=$SERVER_MYPORT_2, master_host='127.0.0.1', master_user='root';
+start slave 'slave1';
+set default_master_connection = 'slave1';
+--source include/wait_for_slave_to_start.inc
+set default_master_connection = 'slave2';
+start all slaves;
+
+--source include/wait_for_slave_to_start.inc
+set default_master_connection = '';
+
+# Ensure that all data is in the relay log
+--connection master1
+--save_master_pos
+--connection slave
+--sync_with_master 0,'slave1'
+--connection master2
+--save_master_pos
+--connection slave
+--sync_with_master 0,'slave2'
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1 $SERVER_MYPORT_2 MYPORT_2
+show all slaves status;
+
+# Ensure that start all slaves doesn't do anything as all slaves are started
+start all slaves;
+
+stop slave 'slave1';
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1 $SERVER_MYPORT_2 MYPORT_2
+query_vertical show slave 'slave1' status;
+
+reset slave 'slave1';
+--replace_result $SERVER_MYPORT_1 MYPORT_1 $SERVER_MYPORT_2 MYPORT_2
+show all slaves status;
+
+reset slave 'slave1' all;
+--replace_result $SERVER_MYPORT_1 MYPORT_1 $SERVER_MYPORT_2 MYPORT_2
+show all slaves status;
+
+stop all slaves;
+--replace_result $SERVER_MYPORT_1 MYPORT_1 $SERVER_MYPORT_2 MYPORT_2
+show all slaves status;
+
+# Ensure that start all slaves doesn't do anything as all slaves are stopped
+stop all slaves;
+
+#
+# clean up
+#
+
+--source reset_master_slave.inc
+--disconnect slave
+--connection master1
+--source reset_master_slave.inc
+--disconnect master1
+--connection master2
+--source reset_master_slave.inc
+--disconnect master2
+
diff --git a/mysql-test/suite/multi_source/skip_counter.result b/mysql-test/suite/multi_source/skip_counter.result
new file mode 100644
index 00000000000..42ad961d6dd
--- /dev/null
+++ b/mysql-test/suite/multi_source/skip_counter.result
@@ -0,0 +1,113 @@
+drop database if exists db;
+create database db;
+create table db.t1 (i int) engine=MyISAM;
+drop database if exists db;
+create database db;
+create table db.t2 (i int) engine=MyISAM;
+change master 'master1' to
+master_port=MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+start slave 'master1';
+set default_master_connection = 'master1';
+include/wait_for_slave_to_start.inc
+set default_master_connection = 'master2';
+change master 'master2' to
+master_port=MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+set global sql_slave_skip_counter = 2;
+select @@global.sql_slave_skip_counter;
+@@global.sql_slave_skip_counter
+2
+select @@session.sql_slave_skip_counter;
+@@session.sql_slave_skip_counter
+2
+set session sql_slave_skip_counter = 3;
+select @@global.sql_slave_skip_counter;
+@@global.sql_slave_skip_counter
+3
+select @@session.sql_slave_skip_counter;
+@@session.sql_slave_skip_counter
+3
+set global sql_slave_skip_counter= default;
+select @@global.sql_slave_skip_counter;
+@@global.sql_slave_skip_counter
+0
+select @@session.sql_slave_skip_counter;
+@@session.sql_slave_skip_counter
+0
+set global sql_slave_skip_counter= 3;
+set default_master_connection = 'master1';
+select @@session.sql_slave_skip_counter;
+@@session.sql_slave_skip_counter
+0
+set default_master_connection = 'qqq';
+select @@session.sql_slave_skip_counter;
+@@session.sql_slave_skip_counter
+0
+Warnings:
+Warning 1617 There is no master connection 'qqq'
+set default_master_connection = 'master2';
+select @@session.sql_slave_skip_counter;
+@@session.sql_slave_skip_counter
+3
+select @@global.max_relay_log_size;
+@@global.max_relay_log_size
+1073741824
+set global max_relay_log_size = 1*1024*1024;
+select @@global.max_relay_log_size;
+@@global.max_relay_log_size
+1048576
+select @@session.max_relay_log_size;
+@@session.max_relay_log_size
+1048576
+set session max_relay_log_size = 3*1024*1024;
+select @@global.max_relay_log_size;
+@@global.max_relay_log_size
+3145728
+select @@session.max_relay_log_size;
+@@session.max_relay_log_size
+3145728
+set global max_relay_log_size= default;
+select @@global.max_relay_log_size;
+@@global.max_relay_log_size
+1073741824
+select @@session.max_relay_log_size;
+@@session.max_relay_log_size
+1073741824
+set global max_relay_log_size= 3*1024*1024;
+set default_master_connection = 'master1';
+select @@session.max_relay_log_size;
+@@session.max_relay_log_size
+1073741824
+set default_master_connection = 'qqq';
+select @@session.max_relay_log_size;
+@@session.max_relay_log_size
+0
+Warnings:
+Warning 1617 There is no master connection 'qqq'
+set default_master_connection = 'master2';
+select @@session.max_relay_log_size;
+@@session.max_relay_log_size
+3145728
+set global max_binlog_size= 4*1024*1024;
+select @@global.max_relay_log_size;
+@@global.max_relay_log_size
+3145728
+start slave 'master2';
+include/wait_for_slave_to_start.inc
+set default_master_connection = '';
+show tables in db;
+Tables_in_db
+t1
+t2
+drop database db;
+set global sql_slave_skip_counter = 0;
+set global max_relay_log_size = 1073741824;
+set global max_binlog_size = 1073741824;
+include/reset_master_slave.inc
+drop database db;
+include/reset_master_slave.inc
+drop database db;
+include/reset_master_slave.inc
diff --git a/mysql-test/suite/multi_source/skip_counter.test b/mysql-test/suite/multi_source/skip_counter.test
new file mode 100644
index 00000000000..d158f382a97
--- /dev/null
+++ b/mysql-test/suite/multi_source/skip_counter.test
@@ -0,0 +1,143 @@
+#
+# Test of sql_slave_skip_counter and rpl_max_size
+#
+
+# Create a schema and a table i
+# on the 1st master
+
+--source include/not_embedded.inc
+
+--connect (master1,127.0.0.1,root,,,$SERVER_MYPORT_1)
+
+--disable_warnings
+drop database if exists db;
+--enable_warnings
+create database db;
+create table db.t1 (i int) engine=MyISAM;
+--save_master_pos
+
+# Create the same schema and another table
+# on the 2nd master
+
+--connect (master2,127.0.0.1,root,,,$SERVER_MYPORT_2)
+
+--disable_warnings
+drop database if exists db;
+--enable_warnings
+create database db;
+create table db.t2 (i int) engine=MyISAM;
+
+--connect (slave,127.0.0.1,root,,,$SERVER_MYPORT_3)
+
+# Start replication from the first master
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+eval change master 'master1' to
+master_port=$SERVER_MYPORT_1,
+master_host='127.0.0.1',
+master_user='root';
+
+start slave 'master1';
+set default_master_connection = 'master1';
+--source include/wait_for_slave_to_start.inc
+--sync_with_master 0,'master1'
+
+# Start replication from the second master
+
+set default_master_connection = 'master2';
+
+--replace_result $SERVER_MYPORT_2 MYPORT_2
+eval change master 'master2' to
+master_port=$SERVER_MYPORT_2,
+master_host='127.0.0.1',
+master_user='root';
+
+# the schema creation will be replicated from the 1st master,
+# so we want to skip it in the second replication connection.
+
+# Normally it should have been 2 events, but
+# currently Binlog_checkpoint also counts. Maybe we'll need
+# to modify the test later
+
+--let $skip_counter_saved = `select @@global.sql_slave_skip_counter`
+--let $max_relay_log_size_saved= `select @@global.max_relay_log_size`
+--let $max_binlog_size_saved= `select @@global.max_binlog_size`
+set global sql_slave_skip_counter = 2;
+select @@global.sql_slave_skip_counter;
+select @@session.sql_slave_skip_counter;
+set session sql_slave_skip_counter = 3;
+select @@global.sql_slave_skip_counter;
+select @@session.sql_slave_skip_counter;
+set global sql_slave_skip_counter= default;
+select @@global.sql_slave_skip_counter;
+select @@session.sql_slave_skip_counter;
+set global sql_slave_skip_counter= 3;
+set default_master_connection = 'master1';
+select @@session.sql_slave_skip_counter;
+set default_master_connection = 'qqq';
+select @@session.sql_slave_skip_counter;
+set default_master_connection = 'master2';
+select @@session.sql_slave_skip_counter;
+
+# Test of setting max_relay_log_size
+select @@global.max_relay_log_size;
+set global max_relay_log_size = 1*1024*1024;
+select @@global.max_relay_log_size;
+select @@session.max_relay_log_size;
+set session max_relay_log_size = 3*1024*1024;
+select @@global.max_relay_log_size;
+select @@session.max_relay_log_size;
+set global max_relay_log_size= default;
+select @@global.max_relay_log_size;
+select @@session.max_relay_log_size;
+set global max_relay_log_size= 3*1024*1024;
+set default_master_connection = 'master1';
+select @@session.max_relay_log_size;
+set default_master_connection = 'qqq';
+select @@session.max_relay_log_size;
+set default_master_connection = 'master2';
+select @@session.max_relay_log_size;
+set global max_binlog_size= 4*1024*1024;
+select @@global.max_relay_log_size;
+
+
+start slave 'master2';
+--source include/wait_for_slave_to_start.inc
+set default_master_connection = '';
+
+--connection master2
+--save_master_pos
+
+--connection slave
+
+--sync_with_master 0,'master2'
+
+# If the skip_counter worked as expected, we should
+# get here (replication shouldn't have broken)
+# and should see both tables here
+# (drop database which came from master2 shoudn't have been executed
+# so t1 should still exist)
+
+show tables in db;
+
+# Cleanup
+
+drop database db;
+
+--eval set global sql_slave_skip_counter = $skip_counter_saved
+--eval set global max_relay_log_size = $max_relay_log_size_saved
+--eval set global max_binlog_size = $max_binlog_size_saved
+
+--source reset_master_slave.inc
+--disconnect slave
+
+--connection master1
+drop database db;
+--source reset_master_slave.inc
+--disconnect master1
+
+--connection master2
+drop database db;
+--source reset_master_slave.inc
+--disconnect master2
+
diff --git a/mysql-test/suite/multi_source/status_vars.result b/mysql-test/suite/multi_source/status_vars.result
new file mode 100644
index 00000000000..12917f94140
--- /dev/null
+++ b/mysql-test/suite/multi_source/status_vars.result
@@ -0,0 +1,97 @@
+call mtr.add_suppression("Connection 'master1' already exists");
+change master 'master1' to
+master_port=MYPORT_1,
+master_host='127.0.0.1',
+master_user='root',
+master_heartbeat_period = 25;
+start slave 'master1';
+set default_master_connection = 'master1';
+include/wait_for_slave_to_start.inc
+set default_master_connection = '';
+change master to
+master_port=MYPORT_2,
+master_host='127.0.0.1',
+master_user='root',
+master_heartbeat_period=35;
+start slave;
+include/wait_for_slave_to_start.inc
+#
+# Check how status works for the default connection, anonymous or named
+#
+# Slave_running and Slave_heartbeat_period should be local for a connection
+#
+set default_master_connection = '';
+show status like 'Slave_running';
+Variable_name Value
+Slave_running ON
+show status like 'Slave_heartbeat_period';
+Variable_name Value
+Slave_heartbeat_period 35.000
+stop slave io_thread;
+include/wait_for_slave_io_to_stop.inc
+show status like 'Slave_running';
+Variable_name Value
+Slave_running OFF
+set default_master_connection = 'master1';
+show status like 'Slave_running';
+Variable_name Value
+Slave_running ON
+show status like 'Slave_heartbeat_period';
+Variable_name Value
+Slave_heartbeat_period 25.000
+#
+# Slave_received_heartbeats should also be local
+#
+set default_master_connection = '';
+stop slave sql_thread;
+include/wait_for_slave_sql_to_stop.inc
+change master to master_heartbeat_period=1;
+show status like 'Slave_received_heartbeats';
+Variable_name Value
+Slave_received_heartbeats 0
+start slave;
+include/wait_for_slave_to_start.inc
+show status like 'Slave_received_heartbeats';
+Variable_name Value
+Slave_received_heartbeats 2
+stop slave;
+include/wait_for_slave_to_stop.inc
+set default_master_connection = 'master1';
+show status like 'Slave_received_heartbeats';
+Variable_name Value
+Slave_received_heartbeats 0
+stop slave;
+include/wait_for_slave_to_stop.inc
+change master to master_heartbeat_period=2;
+start slave;
+include/wait_for_slave_to_start.inc
+show status like 'Slave_received_heartbeats';
+Variable_name Value
+Slave_received_heartbeats 1
+#
+# Slave_open_temp_tables should be global
+#
+set default_master_connection = '';
+start slave;
+include/wait_for_slave_to_start.inc
+set binlog_format = statement;
+create temporary table tmp1 (i int) engine=MyISAM;
+show status like 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 1
+set default_master_connection = 'master1';
+show status like 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 1
+set binlog_format = statement;
+create temporary table tmp1 (i int) engine=MyISAM;
+show status like 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 2
+set default_master_connection = '';
+show status like 'Slave_open_temp_tables';
+Variable_name Value
+Slave_open_temp_tables 2
+include/reset_master_slave.inc
+include/reset_master_slave.inc
+include/reset_master_slave.inc
diff --git a/mysql-test/suite/multi_source/status_vars.test b/mysql-test/suite/multi_source/status_vars.test
new file mode 100644
index 00000000000..eec89ee4e81
--- /dev/null
+++ b/mysql-test/suite/multi_source/status_vars.test
@@ -0,0 +1,138 @@
+#
+# Status variables related to a replication connection
+#
+
+--source include/not_embedded.inc
+
+--connect (slave,127.0.0.1,root,,,$SERVER_MYPORT_3)
+
+call mtr.add_suppression("Connection 'master1' already exists");
+
+# Start replication from the first master
+
+--replace_result $SERVER_MYPORT_1 MYPORT_1
+eval change master 'master1' to
+master_port=$SERVER_MYPORT_1,
+master_host='127.0.0.1',
+master_user='root',
+master_heartbeat_period = 25;
+
+start slave 'master1';
+set default_master_connection = 'master1';
+--source include/wait_for_slave_to_start.inc
+
+
+# Set up a proper 'default' connection to master2
+
+set default_master_connection = '';
+
+--replace_result $SERVER_MYPORT_2 MYPORT_2
+eval change master to
+master_port=$SERVER_MYPORT_2,
+master_host='127.0.0.1',
+master_user='root',
+master_heartbeat_period=35;
+
+start slave;
+--source include/wait_for_slave_to_start.inc
+
+--echo #
+--echo # Check how status works for the default connection, anonymous or named
+--echo #
+
+--echo # Slave_running and Slave_heartbeat_period should be local for a connection
+--echo #
+
+set default_master_connection = '';
+show status like 'Slave_running';
+show status like 'Slave_heartbeat_period';
+stop slave io_thread;
+--source include/wait_for_slave_io_to_stop.inc
+show status like 'Slave_running';
+
+set default_master_connection = 'master1';
+show status like 'Slave_running';
+show status like 'Slave_heartbeat_period';
+
+--echo #
+--echo # Slave_received_heartbeats should also be local
+--echo #
+
+set default_master_connection = '';
+stop slave sql_thread;
+--source include/wait_for_slave_sql_to_stop.inc
+change master to master_heartbeat_period=1;
+show status like 'Slave_received_heartbeats';
+start slave;
+--source include/wait_for_slave_to_start.inc
+
+--let $status_var = Slave_received_heartbeats
+--let $status_var_value = 2
+# The units are tens of seconds
+--let $status_timeout = 30
+--source include/wait_for_status_var.inc
+show status like 'Slave_received_heartbeats';
+stop slave;
+--source include/wait_for_slave_to_stop.inc
+
+set default_master_connection = 'master1';
+show status like 'Slave_received_heartbeats';
+
+stop slave;
+--source include/wait_for_slave_to_stop.inc
+change master to master_heartbeat_period=2;
+start slave;
+--source include/wait_for_slave_to_start.inc
+
+--let $status_var = Slave_received_heartbeats
+--let $status_var_value = 1
+--let $status_timeout = 30
+--source include/wait_for_status_var.inc
+show status like 'Slave_received_heartbeats';
+
+
+--echo #
+--echo # Slave_open_temp_tables should be global
+--echo #
+
+set default_master_connection = '';
+start slave;
+--source include/wait_for_slave_to_start.inc
+
+--connect (master1,127.0.0.1,root,,,$SERVER_MYPORT_1)
+set binlog_format = statement;
+create temporary table tmp1 (i int) engine=MyISAM;
+--save_master_pos
+
+--connection slave
+--sync_with_master 0,'master1'
+show status like 'Slave_open_temp_tables';
+
+set default_master_connection = 'master1';
+show status like 'Slave_open_temp_tables';
+
+--connect (master2,127.0.0.1,root,,,$SERVER_MYPORT_2)
+set binlog_format = statement;
+create temporary table tmp1 (i int) engine=MyISAM;
+--save_master_pos
+
+--connection slave
+--sync_with_master 0,''
+show status like 'Slave_open_temp_tables';
+
+set default_master_connection = '';
+show status like 'Slave_open_temp_tables';
+
+
+# Cleanup
+
+--source reset_master_slave.inc
+--disconnect slave
+--connection master1
+--source reset_master_slave.inc
+--disconnect master1
+--connection master2
+--source reset_master_slave.inc
+--disconnect master2
+
+
diff --git a/mysql-test/suite/multi_source/syntax.result b/mysql-test/suite/multi_source/syntax.result
new file mode 100644
index 00000000000..b82902096c3
--- /dev/null
+++ b/mysql-test/suite/multi_source/syntax.result
@@ -0,0 +1,87 @@
+include/master-slave.inc
+[connection master]
+show slave status;
+Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
+show slave '' status;
+Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id
+show all slaves status;
+Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period
+#
+# Check error handling
+#
+show slave 'qqq' status;
+ERROR HY000: There is no master connection 'qqq'
+show slave 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' status;
+ERROR HY000: There is no master connection 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc'
+show slave 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' status;
+ERROR HY000: Incorrect arguments to MASTER_CONNECTION_NAME
+change master 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' to master_host='dummy';
+ERROR HY000: Incorrect arguments to MASTER_CONNECTION_NAME
+start slave 'qqq';
+ERROR HY000: There is no master connection 'qqq'
+stop slave 'qqq';
+ERROR HY000: There is no master connection 'qqq'
+slave 'qqq' start;
+ERROR HY000: There is no master connection 'qqq'
+slave 'qqq' stop;
+ERROR HY000: There is no master connection 'qqq'
+flush slave 'qqq';
+ERROR HY000: There is no master connection 'qqq'
+reset slave 'qqq';
+ERROR HY000: There is no master connection 'qqq'
+select master_pos_wait('master-bin.999999',0,2,'qqq');
+master_pos_wait('master-bin.999999',0,2,'qqq')
+NULL
+Warnings:
+Warning 1617 There is no master connection 'qqq'
+select master_pos_wait('master-bin.999999',0,2,'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc');
+master_pos_wait('master-bin.999999',0,2,'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc')
+NULL
+Warnings:
+Warning 1210 Incorrect arguments to MASTER_CONNECTION_NAME
+#
+# checking usage of default_master_connection;
+#
+select @@default_master_connection;
+@@default_master_connection
+
+select @@global.default_master_connection;
+ERROR HY000: Variable 'default_master_connection' is a SESSION variable
+set @@global.default_master_connection='qqq';
+ERROR HY000: Variable 'default_master_connection' is a SESSION variable and can't be used with SET GLOBAL
+set @@default_master_connection='aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc';
+ERROR 42000: Variable 'default_master_connection' can't be set to the value of 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc'
+select @@default_master_connection;
+@@default_master_connection
+
+set @@default_master_connection='qqq';
+select @@default_master_connection;
+@@default_master_connection
+qqq
+show variables like "default_master_connection";
+Variable_name Value
+default_master_connection qqq
+show slave status;
+ERROR HY000: There is no master connection 'qqq'
+select master_pos_wait('master-bin.999999',0,2);
+master_pos_wait('master-bin.999999',0,2)
+NULL
+Warnings:
+Warning 1617 There is no master connection 'qqq'
+set @@default_master_connection='';
+select master_pos_wait('master-bin.999999',0,2);
+master_pos_wait('master-bin.999999',0,2)
+-1
+set @@default_master_connection='';
+#
+# checking variables
+#
+show status like "Slave_running";
+Variable_name Value
+Slave_running ON
+set @@default_master_connection='qqq';
+show status like "Slave_running";
+Variable_name Value
+Slave_running
+set @@default_master_connection='';
+include/rpl_end.inc
diff --git a/mysql-test/suite/multi_source/syntax.test b/mysql-test/suite/multi_source/syntax.test
new file mode 100644
index 00000000000..ae493200bf1
--- /dev/null
+++ b/mysql-test/suite/multi_source/syntax.test
@@ -0,0 +1,79 @@
+# Test multi master syntax
+
+--source include/not_embedded.inc
+--source include/master-slave.inc
+
+# Check syntax of multi source replication
+
+show slave status;
+show slave '' status;
+show all slaves status;
+
+--echo #
+--echo # Check error handling
+--echo #
+
+--error WARN_NO_MASTER_INFO
+show slave 'qqq' status;
+--error WARN_NO_MASTER_INFO
+show slave 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' status;
+--error ER_WRONG_ARGUMENTS
+show slave 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' status;
+--error ER_WRONG_ARGUMENTS
+change master 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc' to master_host='dummy';
+
+--error WARN_NO_MASTER_INFO
+start slave 'qqq';
+--error WARN_NO_MASTER_INFO
+stop slave 'qqq';
+--error WARN_NO_MASTER_INFO
+slave 'qqq' start;
+--error WARN_NO_MASTER_INFO
+slave 'qqq' stop;
+--error WARN_NO_MASTER_INFO
+flush slave 'qqq';
+--error WARN_NO_MASTER_INFO
+reset slave 'qqq';
+
+select master_pos_wait('master-bin.999999',0,2,'qqq');
+select master_pos_wait('master-bin.999999',0,2,'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc');
+
+save_master_pos;
+connection slave;
+sync_with_master 0,'';
+sync_with_master 0 ,'';
+sync_with_master 0, '';
+
+--echo #
+--echo # checking usage of default_master_connection;
+--echo #
+select @@default_master_connection;
+
+--error 1238
+select @@global.default_master_connection;
+--error 1228
+set @@global.default_master_connection='qqq';
+--error 1231
+set @@default_master_connection='aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc';
+select @@default_master_connection;
+set @@default_master_connection='qqq';
+select @@default_master_connection;
+show variables like "default_master_connection";
+
+--error WARN_NO_MASTER_INFO
+show slave status;
+select master_pos_wait('master-bin.999999',0,2);
+set @@default_master_connection='';
+select master_pos_wait('master-bin.999999',0,2);
+
+set @@default_master_connection='';
+
+--echo #
+--echo # checking variables
+--echo #
+show status like "Slave_running";
+set @@default_master_connection='qqq';
+show status like "Slave_running";
+set @@default_master_connection='';
+
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/multi_source/wait_for_sql_thread_read_all.inc b/mysql-test/suite/multi_source/wait_for_sql_thread_read_all.inc
new file mode 100644
index 00000000000..ec5ecd0cb17
--- /dev/null
+++ b/mysql-test/suite/multi_source/wait_for_sql_thread_read_all.inc
@@ -0,0 +1,6 @@
+--let $show_statement = show all slaves status
+--let $field = Slave_SQL_State
+--let $condition = = 'Slave has read all relay log; waiting for the slave I/O thread to update it'
+--let $wait_for_all = 1
+--source include/wait_show_condition.inc
+
diff --git a/mysql-test/suite/percona/percona_innodb_fake_changes.result b/mysql-test/suite/percona/percona_innodb_fake_changes.result
index 7f00c687c54..5540c76f2e5 100644
--- a/mysql-test/suite/percona/percona_innodb_fake_changes.result
+++ b/mysql-test/suite/percona/percona_innodb_fake_changes.result
@@ -34,7 +34,7 @@ SELECT * FROM t1;
a
1
COMMIT;
-ERROR HY000: Got error 131 during COMMIT
+ERROR HY000: Got error 131 "Command not supported by database" during COMMIT
SET innodb_fake_changes=default;
DROP TABLE t1;
# DDL must result in error
@@ -43,13 +43,13 @@ SET autocommit=0;
SET innodb_fake_changes=1;
BEGIN;
CREATE TABLE t2 (a INT) ENGINE=InnoDB;
-ERROR HY000: Can't create table 'test.t2' (errno: 131)
+ERROR HY000: Can't create table 'test.t2' (errno: 131 "Command not supported by database")
DROP TABLE t1;
ERROR 42S02: Unknown table 't1'
TRUNCATE TABLE t1;
-ERROR HY000: Got error 131 during COMMIT
+ERROR HY000: Got error 131 "Command not supported by database" during COMMIT
ALTER TABLE t1 ENGINE=MyISAM;
-ERROR HY000: Got error 131 during COMMIT
+ERROR HY000: Got error 131 "Command not supported by database" during COMMIT
ROLLBACK;
SET innodb_fake_changes=default;
DROP TABLE t1;
diff --git a/mysql-test/suite/perfschema/r/all_instances.result b/mysql-test/suite/perfschema/r/all_instances.result
index caf4f49034f..d59f17847e2 100644
--- a/mysql-test/suite/perfschema/r/all_instances.result
+++ b/mysql-test/suite/perfschema/r/all_instances.result
@@ -76,7 +76,9 @@ wait/synch/mutex/sql/Master_info::run_lock
wait/synch/mutex/sql/Master_info::sleep_lock
wait/synch/mutex/sql/MDL_map::mutex
wait/synch/mutex/sql/MDL_wait::LOCK_wait_status
+wait/synch/mutex/sql/MYSQL_BIN_LOG::LOCK_binlog_background_thread
wait/synch/mutex/sql/MYSQL_BIN_LOG::LOCK_index
+wait/synch/mutex/sql/MYSQL_BIN_LOG::LOCK_xid_list
wait/synch/mutex/sql/MYSQL_RELAY_LOG::LOCK_index
wait/synch/mutex/sql/Query_cache::structure_guard_mutex
wait/synch/mutex/sql/Relay_log_info::data_lock
@@ -128,7 +130,10 @@ wait/synch/cond/sql/Master_info::sleep_cond
wait/synch/cond/sql/Master_info::start_cond
wait/synch/cond/sql/Master_info::stop_cond
wait/synch/cond/sql/MDL_context::COND_wait_status
+wait/synch/cond/sql/MYSQL_BIN_LOG::COND_binlog_background_thread
+wait/synch/cond/sql/MYSQL_BIN_LOG::COND_binlog_background_thread_end
wait/synch/cond/sql/MYSQL_BIN_LOG::COND_queue_busy
+wait/synch/cond/sql/MYSQL_BIN_LOG::COND_xid_list
wait/synch/cond/sql/MYSQL_BIN_LOG::update_cond
wait/synch/cond/sql/MYSQL_RELAY_LOG::COND_queue_busy
wait/synch/cond/sql/MYSQL_RELAY_LOG::update_cond
diff --git a/mysql-test/suite/perfschema/r/dml_cond_instances.result b/mysql-test/suite/perfschema/r/dml_cond_instances.result
index 285c32090af..5cd04127700 100644
--- a/mysql-test/suite/perfschema/r/dml_cond_instances.result
+++ b/mysql-test/suite/perfschema/r/dml_cond_instances.result
@@ -13,8 +13,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'cond_in
delete from performance_schema.cond_instances;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'cond_instances'
LOCK TABLES performance_schema.cond_instances READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'cond_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'cond_instances'
UNLOCK TABLES;
LOCK TABLES performance_schema.cond_instances WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'cond_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'cond_instances'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_events_waits_current.result b/mysql-test/suite/perfschema/r/dml_events_waits_current.result
index 122cfcce4a1..66ff929e79b 100644
--- a/mysql-test/suite/perfschema/r/dml_events_waits_current.result
+++ b/mysql-test/suite/perfschema/r/dml_events_waits_current.result
@@ -18,8 +18,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_
delete from performance_schema.events_waits_current;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_waits_current'
LOCK TABLES performance_schema.events_waits_current READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_current'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_current'
UNLOCK TABLES;
LOCK TABLES performance_schema.events_waits_current WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_current'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_current'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_events_waits_history.result b/mysql-test/suite/perfschema/r/dml_events_waits_history.result
index 199ccc1cfa5..ba883c9552a 100644
--- a/mysql-test/suite/perfschema/r/dml_events_waits_history.result
+++ b/mysql-test/suite/perfschema/r/dml_events_waits_history.result
@@ -22,8 +22,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_
delete from performance_schema.events_waits_history;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_waits_history'
LOCK TABLES performance_schema.events_waits_history READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_history'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_history'
UNLOCK TABLES;
LOCK TABLES performance_schema.events_waits_history WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_history'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_history'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_events_waits_history_long.result b/mysql-test/suite/perfschema/r/dml_events_waits_history_long.result
index 773dcd3b1dc..408368ca77c 100644
--- a/mysql-test/suite/perfschema/r/dml_events_waits_history_long.result
+++ b/mysql-test/suite/perfschema/r/dml_events_waits_history_long.result
@@ -22,8 +22,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_
delete from performance_schema.events_waits_history_long;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_waits_history_long'
LOCK TABLES performance_schema.events_waits_history_long READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_history_long'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_history_long'
UNLOCK TABLES;
LOCK TABLES performance_schema.events_waits_history_long WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_history_long'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_history_long'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_ews_by_instance.result b/mysql-test/suite/perfschema/r/dml_ews_by_instance.result
index 6ba37025d3b..0c95c61fd08 100644
--- a/mysql-test/suite/perfschema/r/dml_ews_by_instance.result
+++ b/mysql-test/suite/perfschema/r/dml_ews_by_instance.result
@@ -27,8 +27,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_
delete from performance_schema.events_waits_summary_by_instance;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_waits_summary_by_instance'
LOCK TABLES performance_schema.events_waits_summary_by_instance READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_summary_by_instance'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_summary_by_instance'
UNLOCK TABLES;
LOCK TABLES performance_schema.events_waits_summary_by_instance WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_summary_by_instance'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_summary_by_instance'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_ews_by_thread_by_event_name.result b/mysql-test/suite/perfschema/r/dml_ews_by_thread_by_event_name.result
index a98acb5f536..27f8b5644fd 100644
--- a/mysql-test/suite/perfschema/r/dml_ews_by_thread_by_event_name.result
+++ b/mysql-test/suite/perfschema/r/dml_ews_by_thread_by_event_name.result
@@ -19,8 +19,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_
delete from performance_schema.events_waits_summary_by_thread_by_event_name;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_waits_summary_by_thread_by_event_name'
LOCK TABLES performance_schema.events_waits_summary_by_thread_by_event_name READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_summary_by_thread_by_event_name'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_summary_by_thread_by_event_name'
UNLOCK TABLES;
LOCK TABLES performance_schema.events_waits_summary_by_thread_by_event_name WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_summary_by_thread_by_event_name'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_summary_by_thread_by_event_name'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_ews_global_by_event_name.result b/mysql-test/suite/perfschema/r/dml_ews_global_by_event_name.result
index 159adbd8022..6ddf1841a4e 100644
--- a/mysql-test/suite/perfschema/r/dml_ews_global_by_event_name.result
+++ b/mysql-test/suite/perfschema/r/dml_ews_global_by_event_name.result
@@ -18,8 +18,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_
delete from performance_schema.events_waits_summary_global_by_event_name;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'events_waits_summary_global_by_event_name'
LOCK TABLES performance_schema.events_waits_summary_global_by_event_name READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_summary_global_by_event_name'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_summary_global_by_event_name'
UNLOCK TABLES;
LOCK TABLES performance_schema.events_waits_summary_global_by_event_name WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_summary_global_by_event_name'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_summary_global_by_event_name'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_file_instances.result b/mysql-test/suite/perfschema/r/dml_file_instances.result
index 4a8a7ac4d47..52204160a4b 100644
--- a/mysql-test/suite/perfschema/r/dml_file_instances.result
+++ b/mysql-test/suite/perfschema/r/dml_file_instances.result
@@ -13,8 +13,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'file_in
delete from performance_schema.file_instances;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'file_instances'
LOCK TABLES performance_schema.file_instances READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'file_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'file_instances'
UNLOCK TABLES;
LOCK TABLES performance_schema.file_instances WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'file_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'file_instances'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_file_summary_by_event_name.result b/mysql-test/suite/perfschema/r/dml_file_summary_by_event_name.result
index a8a9fe852bb..be1b744f786 100644
--- a/mysql-test/suite/perfschema/r/dml_file_summary_by_event_name.result
+++ b/mysql-test/suite/perfschema/r/dml_file_summary_by_event_name.result
@@ -18,8 +18,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'file_su
delete from performance_schema.file_summary_by_event_name;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'file_summary_by_event_name'
LOCK TABLES performance_schema.file_summary_by_event_name READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'file_summary_by_event_name'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'file_summary_by_event_name'
UNLOCK TABLES;
LOCK TABLES performance_schema.file_summary_by_event_name WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'file_summary_by_event_name'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'file_summary_by_event_name'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_file_summary_by_instance.result b/mysql-test/suite/perfschema/r/dml_file_summary_by_instance.result
index 456d6e31173..eabd9170cbc 100644
--- a/mysql-test/suite/perfschema/r/dml_file_summary_by_instance.result
+++ b/mysql-test/suite/perfschema/r/dml_file_summary_by_instance.result
@@ -18,8 +18,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'file_su
delete from performance_schema.file_summary_by_instance;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'file_summary_by_instance'
LOCK TABLES performance_schema.file_summary_by_instance READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'file_summary_by_instance'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'file_summary_by_instance'
UNLOCK TABLES;
LOCK TABLES performance_schema.file_summary_by_instance WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'file_summary_by_instance'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'file_summary_by_instance'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_mutex_instances.result b/mysql-test/suite/perfschema/r/dml_mutex_instances.result
index 665517c7227..0089e20b3b6 100644
--- a/mysql-test/suite/perfschema/r/dml_mutex_instances.result
+++ b/mysql-test/suite/perfschema/r/dml_mutex_instances.result
@@ -13,8 +13,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'mutex_i
delete from performance_schema.mutex_instances;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'mutex_instances'
LOCK TABLES performance_schema.mutex_instances READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'mutex_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'mutex_instances'
UNLOCK TABLES;
LOCK TABLES performance_schema.mutex_instances WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'mutex_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'mutex_instances'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_performance_timers.result b/mysql-test/suite/perfschema/r/dml_performance_timers.result
index f345cce921e..99d0b9fdf60 100644
--- a/mysql-test/suite/perfschema/r/dml_performance_timers.result
+++ b/mysql-test/suite/perfschema/r/dml_performance_timers.result
@@ -22,8 +22,8 @@ delete from performance_schema.performance_timers
where timer_name='CYCLE';
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'performance_timers'
LOCK TABLES performance_schema.performance_timers READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'performance_timers'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'performance_timers'
UNLOCK TABLES;
LOCK TABLES performance_schema.performance_timers WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'performance_timers'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'performance_timers'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_rwlock_instances.result b/mysql-test/suite/perfschema/r/dml_rwlock_instances.result
index b072eea3955..117c4e47508 100644
--- a/mysql-test/suite/perfschema/r/dml_rwlock_instances.result
+++ b/mysql-test/suite/perfschema/r/dml_rwlock_instances.result
@@ -13,8 +13,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'rwlock_
delete from performance_schema.rwlock_instances;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'rwlock_instances'
LOCK TABLES performance_schema.rwlock_instances READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'rwlock_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'rwlock_instances'
UNLOCK TABLES;
LOCK TABLES performance_schema.rwlock_instances WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'rwlock_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'rwlock_instances'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_threads.result b/mysql-test/suite/perfschema/r/dml_threads.result
index b78d1934d1f..c5a753e7f14 100644
--- a/mysql-test/suite/perfschema/r/dml_threads.result
+++ b/mysql-test/suite/perfschema/r/dml_threads.result
@@ -17,8 +17,8 @@ ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'threads
delete from performance_schema.threads;
ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'threads'
LOCK TABLES performance_schema.threads READ;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'threads'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'threads'
UNLOCK TABLES;
LOCK TABLES performance_schema.threads WRITE;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'threads'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'threads'
UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/misc.result b/mysql-test/suite/perfschema/r/misc.result
index 2f66f80ed75..1dbde982328 100644
--- a/mysql-test/suite/perfschema/r/misc.result
+++ b/mysql-test/suite/perfschema/r/misc.result
@@ -6,9 +6,9 @@ AND EVENT_NAME IN
WHERE NAME LIKE "wait/synch/%")
LIMIT 1;
create table test.t1(a int) engine=performance_schema;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.events_waits_current;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table performance_schema.t1(a int);
ERROR 42000: CREATE command denied to user 'root'@'localhost' for table 't1'
drop table if exists test.ghost;
diff --git a/mysql-test/suite/perfschema/r/myisam_file_io.result b/mysql-test/suite/perfschema/r/myisam_file_io.result
index 5cdcf6ac789..eb8b0d6f9f5 100644
--- a/mysql-test/suite/perfschema/r/myisam_file_io.result
+++ b/mysql-test/suite/perfschema/r/myisam_file_io.result
@@ -17,6 +17,7 @@ substring(object_name, locate("no_index_tab", object_name)) as short_name
from performance_schema.events_waits_history_long
where operation not like "tell"
and event_name like "wait/io/file/myisam/%"
+ having short_name <> ""
order by thread_id, event_id;
event_name short_source operation number_of_bytes short_name
wait/io/file/myisam/kfile mi_create.c: create NULL no_index_tab.MYI
diff --git a/mysql-test/suite/perfschema/r/privilege.result b/mysql-test/suite/perfschema/r/privilege.result
index d1831c9c4be..019d8964523 100644
--- a/mysql-test/suite/perfschema/r/privilege.result
+++ b/mysql-test/suite/perfschema/r/privilege.result
@@ -152,13 +152,13 @@ before insert on performance_schema.file_instances
for each row begin end;
ERROR 42000: Access denied for user 'root'@'localhost' to database 'performance_schema'
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.setup_instruments;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.events_waits_current;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.file_instances;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
insert into performance_schema.setup_instruments
set name="foo";
ERROR 42000: INSERT command denied to user 'root'@'localhost' for table 'setup_instruments'
@@ -179,16 +179,16 @@ unlock tables;
lock table performance_schema.setup_instruments write;
unlock tables;
lock table performance_schema.events_waits_current read;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_current'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_current'
unlock tables;
lock table performance_schema.events_waits_current write;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'events_waits_current'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'events_waits_current'
unlock tables;
lock table performance_schema.file_instances read;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'file_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'file_instances'
unlock tables;
lock table performance_schema.file_instances write;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'root'@'localhost' for table 'file_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'root'@'localhost' for table 'file_instances'
unlock tables;
#
# WL#4818, NFS2: Can use grants to give normal user access
@@ -250,13 +250,13 @@ before insert on performance_schema.file_instances
for each row begin end;
ERROR 42000: Access denied for user 'pfs_user_1'@'localhost' to database 'performance_schema'
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.setup_instruments;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.events_waits_current;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.file_instances;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
insert into performance_schema.setup_instruments
set name="foo";
ERROR 42000: INSERT command denied to user 'pfs_user_1'@'localhost' for table 'setup_instruments'
@@ -277,16 +277,16 @@ unlock tables;
lock table performance_schema.setup_instruments write;
unlock tables;
lock table performance_schema.events_waits_current read;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_1'@'localhost' for table 'events_waits_current'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_1'@'localhost' for table 'events_waits_current'
unlock tables;
lock table performance_schema.events_waits_current write;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_1'@'localhost' for table 'events_waits_current'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_1'@'localhost' for table 'events_waits_current'
unlock tables;
lock table performance_schema.file_instances read;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_1'@'localhost' for table 'file_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_1'@'localhost' for table 'file_instances'
unlock tables;
lock table performance_schema.file_instances write;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_1'@'localhost' for table 'file_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_1'@'localhost' for table 'file_instances'
unlock tables;
#
# WL#4818, NFS2: Can use grants to give normal user access
@@ -348,13 +348,13 @@ before insert on performance_schema.file_instances
for each row begin end;
ERROR 42000: Access denied for user 'pfs_user_2'@'localhost' to database 'performance_schema'
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.setup_instruments;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.events_waits_current;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.file_instances;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
insert into performance_schema.setup_instruments
set name="foo";
ERROR 42000: INSERT command denied to user 'pfs_user_2'@'localhost' for table 'setup_instruments'
@@ -375,16 +375,16 @@ unlock tables;
lock table performance_schema.setup_instruments write;
unlock tables;
lock table performance_schema.events_waits_current read;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_2'@'localhost' for table 'events_waits_current'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_2'@'localhost' for table 'events_waits_current'
unlock tables;
lock table performance_schema.events_waits_current write;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_2'@'localhost' for table 'events_waits_current'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_2'@'localhost' for table 'events_waits_current'
unlock tables;
lock table performance_schema.file_instances read;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_2'@'localhost' for table 'file_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_2'@'localhost' for table 'file_instances'
unlock tables;
lock table performance_schema.file_instances write;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_2'@'localhost' for table 'file_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_2'@'localhost' for table 'file_instances'
unlock tables;
#
# WL#4818, NFS2: Can use grants to give normal user access
@@ -446,13 +446,13 @@ before insert on performance_schema.file_instances
for each row begin end;
ERROR 42000: Access denied for user 'pfs_user_3'@'localhost' to database 'performance_schema'
create table test.t1(a int) engine=PERFORMANCE_SCHEMA;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.setup_instruments;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.events_waits_current;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
create table test.t1 like performance_schema.file_instances;
-ERROR HY000: Can't create table 'test.t1' (errno: 131)
+ERROR HY000: Can't create table 'test.t1' (errno: 131 "Command not supported by database")
insert into performance_schema.setup_instruments
set name="foo";
ERROR 42000: INSERT command denied to user 'pfs_user_3'@'localhost' for table 'setup_instruments'
@@ -473,16 +473,16 @@ unlock tables;
lock table performance_schema.setup_instruments write;
unlock tables;
lock table performance_schema.events_waits_current read;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_3'@'localhost' for table 'events_waits_current'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_3'@'localhost' for table 'events_waits_current'
unlock tables;
lock table performance_schema.events_waits_current write;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_3'@'localhost' for table 'events_waits_current'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_3'@'localhost' for table 'events_waits_current'
unlock tables;
lock table performance_schema.file_instances read;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_3'@'localhost' for table 'file_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_3'@'localhost' for table 'file_instances'
unlock tables;
lock table performance_schema.file_instances write;
-ERROR 42000: SELECT,LOCK TABL command denied to user 'pfs_user_3'@'localhost' for table 'file_instances'
+ERROR 42000: SELECT,LOCK TABLES command denied to user 'pfs_user_3'@'localhost' for table 'file_instances'
unlock tables;
#
# WL#4818, NFS2: Can use grants to give normal user access
diff --git a/mysql-test/suite/perfschema/r/relaylog.result b/mysql-test/suite/perfschema/r/relaylog.result
index a1d10265f4d..7de32ef07d0 100644
--- a/mysql-test/suite/perfschema/r/relaylog.result
+++ b/mysql-test/suite/perfschema/r/relaylog.result
@@ -56,10 +56,13 @@ where event_name like "%MYSQL_BIN_LOG%"
and event_name not like "%MYSQL_BIN_LOG::update_cond"
order by event_name;
EVENT_NAME COUNT_STAR
-wait/synch/cond/sql/MYSQL_BIN_LOG::COND_prep_xids NONE
+wait/synch/cond/sql/MYSQL_BIN_LOG::COND_binlog_background_thread NONE
+wait/synch/cond/sql/MYSQL_BIN_LOG::COND_binlog_background_thread_end NONE
wait/synch/cond/sql/MYSQL_BIN_LOG::COND_queue_busy NONE
+wait/synch/cond/sql/MYSQL_BIN_LOG::COND_xid_list NONE
+wait/synch/mutex/sql/MYSQL_BIN_LOG::LOCK_binlog_background_thread MANY
wait/synch/mutex/sql/MYSQL_BIN_LOG::LOCK_index MANY
-wait/synch/mutex/sql/MYSQL_BIN_LOG::LOCK_prep_xids NONE
+wait/synch/mutex/sql/MYSQL_BIN_LOG::LOCK_xid_list MANY
"Expect no slave relay log"
select * from performance_schema.file_summary_by_instance
where event_name like "%relaylog%" order by file_name;
@@ -131,10 +134,13 @@ where event_name like "%MYSQL_BIN_LOG%"
and event_name not like "%MYSQL_BIN_LOG::update_cond"
order by event_name;
EVENT_NAME COUNT_STAR
-wait/synch/cond/sql/MYSQL_BIN_LOG::COND_prep_xids NONE
+wait/synch/cond/sql/MYSQL_BIN_LOG::COND_binlog_background_thread MANY
+wait/synch/cond/sql/MYSQL_BIN_LOG::COND_binlog_background_thread_end NONE
wait/synch/cond/sql/MYSQL_BIN_LOG::COND_queue_busy NONE
+wait/synch/cond/sql/MYSQL_BIN_LOG::COND_xid_list MANY
+wait/synch/mutex/sql/MYSQL_BIN_LOG::LOCK_binlog_background_thread MANY
wait/synch/mutex/sql/MYSQL_BIN_LOG::LOCK_index MANY
-wait/synch/mutex/sql/MYSQL_BIN_LOG::LOCK_prep_xids NONE
+wait/synch/mutex/sql/MYSQL_BIN_LOG::LOCK_xid_list MANY
"Expect a slave relay log"
select
substring(file_name, locate("slave-", file_name)) as FILE_NAME,
diff --git a/mysql-test/suite/perfschema/t/myisam_file_io.test b/mysql-test/suite/perfschema/t/myisam_file_io.test
index 5888c29fe22..d93edd7d3b6 100644
--- a/mysql-test/suite/perfschema/t/myisam_file_io.test
+++ b/mysql-test/suite/perfschema/t/myisam_file_io.test
@@ -46,6 +46,7 @@ select event_name,
from performance_schema.events_waits_history_long
where operation not like "tell"
and event_name like "wait/io/file/myisam/%"
+ having short_name <> ""
order by thread_id, event_id;
# In case of failures, this will tell if file io are lost.
diff --git a/mysql-test/suite/rpl/r/rpl_EE_err.result b/mysql-test/suite/rpl/r/rpl_EE_err.result
index 8220f4e0c8d..5b3f1872c4e 100644
--- a/mysql-test/suite/rpl/r/rpl_EE_err.result
+++ b/mysql-test/suite/rpl/r/rpl_EE_err.result
@@ -4,5 +4,5 @@ create table t1 (a int) engine=myisam;
flush tables;
drop table if exists t1;
Warnings:
-Warning 2 Can't find file: 't1' (errno: 2)
+Warning 2 Can't find file: 't1' (errno: 2 "No such file or directory")
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_binlog_errors.result b/mysql-test/suite/rpl/r/rpl_binlog_errors.result
index ddafbca0672..8315e0bb8f4 100644
--- a/mysql-test/suite/rpl/r/rpl_binlog_errors.result
+++ b/mysql-test/suite/rpl/r/rpl_binlog_errors.result
@@ -176,7 +176,7 @@ SHOW WARNINGS;
Level Code Message
SET GLOBAL debug_dbug="+d,fault_injection_registering_index";
FLUSH LOGS;
-ERROR HY000: Can't open file: 'master-bin.000002' (errno: 1)
+ERROR HY000: Can't open file: 'master-bin.000002' (errno: 1 "Operation not permitted")
SET GLOBAL debug_dbug="-d,fault_injection_registering_index";
SHOW BINARY LOGS;
ERROR HY000: You are not using binary logging
@@ -190,7 +190,7 @@ DROP TABLE t5;
include/rpl_restart_server.inc [server_number=1]
SET GLOBAL debug_dbug="+d,fault_injection_openning_index";
FLUSH LOGS;
-ERROR HY000: Can't open file: 'master-bin.index' (errno: 1)
+ERROR HY000: Can't open file: 'master-bin.index' (errno: 1 "Operation not permitted")
SET GLOBAL debug_dbug="-d,fault_injection_openning_index";
RESET MASTER;
ERROR HY000: Binlog closed, cannot RESET MASTER
@@ -204,7 +204,7 @@ include/rpl_restart_server.inc [server_number=1]
###################### TEST #12
SET GLOBAL debug_dbug="+d,fault_injection_new_file_rotate_event";
FLUSH LOGS;
-ERROR HY000: Can't open file: 'master-bin' (errno: 2)
+ERROR HY000: Can't open file: 'master-bin' (errno: 2 "No such file or directory")
SET GLOBAL debug_dbug="-d,fault_injection_new_file_rotate_event";
RESET MASTER;
ERROR HY000: Binlog closed, cannot RESET MASTER
diff --git a/mysql-test/suite/rpl/r/rpl_checksum.result b/mysql-test/suite/rpl/r/rpl_checksum.result
index ba5cb1b1b2b..fb61f159c80 100644
--- a/mysql-test/suite/rpl/r/rpl_checksum.result
+++ b/mysql-test/suite/rpl/r/rpl_checksum.result
@@ -71,7 +71,7 @@ insert into t1 values (1) /* will not be applied on slave due to simulation */;
set @@global.debug_dbug='d,simulate_slave_unaware_checksum';
start slave;
include/wait_for_slave_io_error.inc [errno=1236]
-Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'Slave can not handle replication events with the checksum that master is configured to log; the first event 'master-bin.000009' at 245, the last event read from 'master-bin.000010' at 245, the last byte read from 'master-bin.000010' at 245.''
+Last_IO_Error = 'Got fatal error 1236 from master when reading data from binary log: 'Slave can not handle replication events with the checksum that master is configured to log; the first event 'master-bin.000009' at 326, the last event read from 'master-bin.000010' at 246, the last byte read from 'master-bin.000010' at 246.''
select count(*) as zero from t1;
zero
0
diff --git a/mysql-test/suite/rpl/r/rpl_conditional_comments.result b/mysql-test/suite/rpl/r/rpl_conditional_comments.result
index 5471f788561..2ff418e068d 100644
--- a/mysql-test/suite/rpl/r/rpl_conditional_comments.result
+++ b/mysql-test/suite/rpl/r/rpl_conditional_comments.result
@@ -9,11 +9,11 @@ master-bin.000001 # Query # # use `test`; CREATE TABLE t1(c1 INT)
# ------------------------------------------------------------------
# In a statement, some CCs are applied while others are not. The CCs
# which are not applied on master will be binlogged as common comments.
-/*!99999 --- */INSERT /*!INTO*/ /*!10000 t1 */ VALUES(10) /*!99999 ,(11)*/;
+/*!999999 --- */INSERT /*!INTO*/ /*!10000 t1 */ VALUES(10) /*!999999 ,(11)*/;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
-master-bin.000001 # Query # # use `test`; /* 99999 --- */INSERT /*!INTO*/ /*!10000 t1 */ VALUES(10) /* 99999 ,(11)*/
+master-bin.000001 # Query # # use `test`; /* 999999 --- */INSERT /*!INTO*/ /*!10000 t1 */ VALUES(10) /* 999999 ,(11)*/
master-bin.000001 # Query # # COMMIT
include/diff_tables.inc [master:t1,slave:t1]
@@ -21,7 +21,7 @@ include/diff_tables.inc [master:t1,slave:t1]
# -----------------------------------------------------------------
# Verify whether it can be binlogged correctly when executing prepared
# statement.
-PREPARE stmt FROM 'INSERT INTO /*!99999 blabla*/ t1 VALUES(60) /*!99999 ,(61)*/';
+PREPARE stmt FROM 'INSERT INTO /*!999999 blabla*/ t1 VALUES(60) /*!999999 ,(61)*/';
EXECUTE stmt;
DROP TABLE t1;
CREATE TABLE t1(c1 INT);
@@ -29,7 +29,7 @@ EXECUTE stmt;
include/diff_tables.inc [master:t1,slave:t1]
SET @value=62;
-PREPARE stmt FROM 'INSERT INTO /*!99999 blabla */ t1 VALUES(?) /*!99999 ,(63)*/';
+PREPARE stmt FROM 'INSERT INTO /*!999999 blabla */ t1 VALUES(?) /*!999999 ,(63)*/';
EXECUTE stmt USING @value;
DROP TABLE t1;
CREATE TABLE t1(c1 INT);
@@ -37,20 +37,20 @@ EXECUTE stmt USING @value;
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query # # BEGIN
-master-bin.000001 # Query # # use `test`; INSERT INTO /* 99999 blabla*/ t1 VALUES(60) /* 99999 ,(61)*/
+master-bin.000001 # Query # # use `test`; INSERT INTO /* 999999 blabla*/ t1 VALUES(60) /* 999999 ,(61)*/
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
master-bin.000001 # Query # # use `test`; CREATE TABLE t1(c1 INT)
master-bin.000001 # Query # # BEGIN
-master-bin.000001 # Query # # use `test`; INSERT INTO /* 99999 blabla*/ t1 VALUES(60) /* 99999 ,(61)*/
+master-bin.000001 # Query # # use `test`; INSERT INTO /* 999999 blabla*/ t1 VALUES(60) /* 999999 ,(61)*/
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # BEGIN
-master-bin.000001 # Query # # use `test`; INSERT INTO /* 99999 blabla */ t1 VALUES(62) /* 99999 ,(63)*/
+master-bin.000001 # Query # # use `test`; INSERT INTO /* 999999 blabla */ t1 VALUES(62) /* 999999 ,(63)*/
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
master-bin.000001 # Query # # use `test`; CREATE TABLE t1(c1 INT)
master-bin.000001 # Query # # BEGIN
-master-bin.000001 # Query # # use `test`; INSERT INTO /* 99999 blabla */ t1 VALUES(62) /* 99999 ,(63)*/
+master-bin.000001 # Query # # use `test`; INSERT INTO /* 999999 blabla */ t1 VALUES(62) /* 999999 ,(63)*/
master-bin.000001 # Query # # COMMIT
include/diff_tables.inc [master:t1,slave:t1]
@@ -58,7 +58,7 @@ include/diff_tables.inc [master:t1,slave:t1]
# -----------------------------------------------------------------
# Verify it can restore the '!', if the it is an uncomplete conditional
# comments
-SELECT c1 FROM /*!99999 t1 WHEREN;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '/*!99999 t1 WHEREN' at line 1
+SELECT c1 FROM /*!999999 t1 WHEREN;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '/*!999999 t1 WHEREN' at line 1
DROP TABLE t1;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_deadlock_innodb.result b/mysql-test/suite/rpl/r/rpl_deadlock_innodb.result
index 37d209bbcf6..f2128f8d855 100644
--- a/mysql-test/suite/rpl/r/rpl_deadlock_innodb.result
+++ b/mysql-test/suite/rpl/r/rpl_deadlock_innodb.result
@@ -76,6 +76,8 @@ include/check_slave_is_running.inc
*** Test lock wait timeout and purged relay logs ***
SET @my_max_relay_log_size= @@global.max_relay_log_size;
SET global max_relay_log_size=0;
+Warnings:
+Warning 1292 Truncated incorrect max_relay_log_size value: '0'
include/stop_slave.inc
DELETE FROM t2;
CHANGE MASTER TO MASTER_LOG_POS=<master_pos_begin>;
diff --git a/mysql-test/suite/rpl/r/rpl_drop_db.result b/mysql-test/suite/rpl/r/rpl_drop_db.result
index 8a88f01a444..dda578a419e 100644
--- a/mysql-test/suite/rpl/r/rpl_drop_db.result
+++ b/mysql-test/suite/rpl/r/rpl_drop_db.result
@@ -8,12 +8,12 @@ select * from mysqltest1.t1 into outfile 'mysqltest1/f1.txt';
create table mysqltest1.t2 (n int);
create table mysqltest1.t3 (n int);
drop database mysqltest1;
-ERROR HY000: Error dropping database (can't rmdir './mysqltest1/', errno: 17)
+ERROR HY000: Error dropping database (can't rmdir './mysqltest1/', errno: 17 "File exists")
use mysqltest1;
show tables;
Tables_in_mysqltest1
drop database mysqltest1;
-ERROR HY000: Error dropping database (can't rmdir './mysqltest1/', errno: 17)
+ERROR HY000: Error dropping database (can't rmdir './mysqltest1/', errno: 17 "File exists")
use mysqltest1;
show tables;
Tables_in_mysqltest1
diff --git a/mysql-test/suite/rpl/r/rpl_filter_dbs_dynamic.result b/mysql-test/suite/rpl/r/rpl_filter_dbs_dynamic.result
index 321b8d912e6..eaa2ed9a61d 100644
--- a/mysql-test/suite/rpl/r/rpl_filter_dbs_dynamic.result
+++ b/mysql-test/suite/rpl/r/rpl_filter_dbs_dynamic.result
@@ -1,9 +1,9 @@
include/master-slave.inc
[connection master]
SET @@GLOBAL.replicate_do_db="db1";
-ERROR HY000: This operation cannot be performed with a running slave; run STOP SLAVE first
+ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first
SET @@GLOBAL.replicate_ignore_db="db2";
-ERROR HY000: This operation cannot be performed with a running slave; run STOP SLAVE first
+ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first
include/stop_slave.inc
SET @@GLOBAL.replicate_do_db="db1";
SET @@GLOBAL.replicate_ignore_db="db2";
diff --git a/mysql-test/suite/rpl/r/rpl_filter_tables_dynamic.result b/mysql-test/suite/rpl/r/rpl_filter_tables_dynamic.result
index 9eb803d17ea..3d03d36828a 100644
--- a/mysql-test/suite/rpl/r/rpl_filter_tables_dynamic.result
+++ b/mysql-test/suite/rpl/r/rpl_filter_tables_dynamic.result
@@ -1,9 +1,9 @@
include/master-slave.inc
[connection master]
SET @@GLOBAL.replicate_do_table="test.t1,test.t2,test.t3";
-ERROR HY000: This operation cannot be performed with a running slave; run STOP SLAVE first
+ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first
SET @@GLOBAL.replicate_ignore_table="test.t4,test.t5,test.t6";
-ERROR HY000: This operation cannot be performed with a running slave; run STOP SLAVE first
+ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first
include/stop_slave.inc
SET @@GLOBAL.replicate_do_table="test.t1,test.t2,test.t3";
SET @@GLOBAL.replicate_ignore_table="test.t4,test.t5,test.t6";
diff --git a/mysql-test/suite/rpl/r/rpl_filter_wild_tables_dynamic.result b/mysql-test/suite/rpl/r/rpl_filter_wild_tables_dynamic.result
index 6858181234e..19d8e513e6f 100644
--- a/mysql-test/suite/rpl/r/rpl_filter_wild_tables_dynamic.result
+++ b/mysql-test/suite/rpl/r/rpl_filter_wild_tables_dynamic.result
@@ -1,9 +1,9 @@
include/master-slave.inc
[connection master]
SET @@GLOBAL.replicate_wild_do_table="test.a%";
-ERROR HY000: This operation cannot be performed with a running slave; run STOP SLAVE first
+ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first
SET @@GLOBAL.replicate_wild_ignore_table="test.b%";
-ERROR HY000: This operation cannot be performed with a running slave; run STOP SLAVE first
+ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first
include/stop_slave.inc
SET @@GLOBAL.replicate_wild_do_table="test.a%";
SET @@GLOBAL.replicate_wild_ignore_table="test.b%";
diff --git a/mysql-test/suite/rpl/r/rpl_flush_logs.result b/mysql-test/suite/rpl/r/rpl_flush_logs.result
index 4fce91a7234..1f543cd9f43 100644
--- a/mysql-test/suite/rpl/r/rpl_flush_logs.result
+++ b/mysql-test/suite/rpl/r/rpl_flush_logs.result
@@ -8,7 +8,7 @@ flush error logs;
# after execute 'flush error logs' statement.
# Test if support 'flush relay logs' statement.
flush relay logs;
-# Check the 'slave-relay-bin.000003' file is created
+# Check the 'slave-relay-bin.000003' file is not created
# after executed 'flush relay logs' statement.
# Make sure binary logs was not be flushed
# after execute 'flush relay logs' statement.
@@ -30,21 +30,21 @@ flush engine logs;
flush binary logs;
# Check the 'master-bin.000002' file is created
# after executed 'flush binary logs' statement.
-# Make sure the 'slave-relay-bin.000006' file does not exist
+# Make sure the 'slave-relay-bin.000005' file does not exist
# exist before execute 'flush error logs, relay logs' statement.
# Test if support to combine all kinds of logs into one statement.
flush error logs, relay logs;
# Make sure binary logs was not be flushed
# after execute 'flush error logs, relay logs' statement.
-# Check the 'slave-relay-bin.000006' file is created after
+# Check the 'slave-relay-bin.000004' file is created after
# execute 'flush error logs, relay logs' statement.
-# Make sure the 'slave-relay-bin.000007' and 'slave-relay-bin.000008'
+# Make sure the 'slave-relay-bin.000005' and 'slave-relay-bin.000006'
# files do not exist before execute 'flush error logs, relay logs'
# statement.
# Test if 'flush logs' statement works fine and flush all the logs.
flush logs;
# Check 'master-bin.000003' is created
# after execute 'flush logs' statement.
-# Check the 'slave-relay-bin.000007' and 'slave-relay-bin.000008'
+# Check the 'slave-relay-bin.000005' and 'slave-relay-bin.000006'
# files are created after execute 'flush logs' statement.
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_function_defaults.result b/mysql-test/suite/rpl/r/rpl_function_defaults.result
new file mode 100644
index 00000000000..264bb3c9c6d
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_function_defaults.result
@@ -0,0 +1,132 @@
+#
+# Test of function defaults on replicated tables.
+#
+include/master-slave.inc
+[connection master]
+connection master
+SET TIME_ZONE="+10:30";
+SET TIMESTAMP=123456.789123;
+SELECT CURRENT_TIMESTAMP;
+CURRENT_TIMESTAMP
+1970-01-02 20:47:36
+connection slave
+SET TIME_ZONE="+00:00";
+SET TIMESTAMP=987654321.123456;
+SELECT CURRENT_TIMESTAMP;
+CURRENT_TIMESTAMP
+2001-04-19 04:25:21
+connection master
+CREATE TABLE t1 (
+a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+b TIMESTAMP(1) NOT NULL DEFAULT CURRENT_TIMESTAMP(1),
+c TIMESTAMP(2) NOT NULL DEFAULT CURRENT_TIMESTAMP(2),
+d TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
+e TIMESTAMP(4) NOT NULL DEFAULT CURRENT_TIMESTAMP(4),
+f TIMESTAMP(5) NOT NULL DEFAULT CURRENT_TIMESTAMP(5),
+g TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+h DATETIME DEFAULT CURRENT_TIMESTAMP,
+i DATETIME(1) DEFAULT CURRENT_TIMESTAMP(1),
+j DATETIME(2) DEFAULT CURRENT_TIMESTAMP(2),
+k DATETIME(3) DEFAULT CURRENT_TIMESTAMP(3),
+l DATETIME(4) DEFAULT CURRENT_TIMESTAMP(4),
+m DATETIME(5) DEFAULT CURRENT_TIMESTAMP(5),
+n DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+o INT
+);
+INSERT INTO t1 ( o ) VALUES ( 1 );
+CREATE TABLE t2 (
+a TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+b TIMESTAMP(1) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(1),
+c TIMESTAMP(2) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(2),
+d TIMESTAMP(3) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(3),
+e TIMESTAMP(4) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(4),
+f TIMESTAMP(5) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(5),
+g TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+h DATETIME ON UPDATE CURRENT_TIMESTAMP,
+i DATETIME(1) ON UPDATE CURRENT_TIMESTAMP(1),
+j DATETIME(2) ON UPDATE CURRENT_TIMESTAMP(2),
+k DATETIME(3) ON UPDATE CURRENT_TIMESTAMP(3),
+l DATETIME(4) ON UPDATE CURRENT_TIMESTAMP(4),
+m DATETIME(5) ON UPDATE CURRENT_TIMESTAMP(5),
+n DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+o INT
+);
+INSERT INTO t2 ( o ) VALUES ( 1 );
+sync_slave_with_master
+connection slave
+SELECT * FROM t1;
+a 1970-01-02 10:17:36
+b 1970-01-02 10:17:36.7
+c 1970-01-02 10:17:36.78
+d 1970-01-02 10:17:36.789
+e 1970-01-02 10:17:36.7891
+f 1970-01-02 10:17:36.78912
+g 1970-01-02 10:17:36.789123
+h 1970-01-02 20:47:36
+i 1970-01-02 20:47:36.7
+j 1970-01-02 20:47:36.78
+k 1970-01-02 20:47:36.789
+l 1970-01-02 20:47:36.7891
+m 1970-01-02 20:47:36.78912
+n 1970-01-02 20:47:36.789123
+o 1
+SELECT * FROM t2;
+a 0000-00-00 00:00:00
+b 0000-00-00 00:00:00.0
+c 0000-00-00 00:00:00.00
+d 0000-00-00 00:00:00.000
+e 0000-00-00 00:00:00.0000
+f 0000-00-00 00:00:00.00000
+g 0000-00-00 00:00:00.000000
+h NULL
+i NULL
+j NULL
+k NULL
+l NULL
+m NULL
+n NULL
+o 1
+connection master
+SET TIMESTAMP=1234567890.123456;
+SELECT CURRENT_TIMESTAMP;
+CURRENT_TIMESTAMP
+2009-02-14 10:01:30
+UPDATE t1 SET o = 2;
+UPDATE t2 SET o = 2;
+sync_slave_with_master
+connection slave
+SELECT * FROM t1;
+a 1970-01-02 10:17:36
+b 1970-01-02 10:17:36.7
+c 1970-01-02 10:17:36.78
+d 1970-01-02 10:17:36.789
+e 1970-01-02 10:17:36.7891
+f 1970-01-02 10:17:36.78912
+g 1970-01-02 10:17:36.789123
+h 1970-01-02 20:47:36
+i 1970-01-02 20:47:36.7
+j 1970-01-02 20:47:36.78
+k 1970-01-02 20:47:36.789
+l 1970-01-02 20:47:36.7891
+m 1970-01-02 20:47:36.78912
+n 1970-01-02 20:47:36.789123
+o 2
+SELECT * FROM t2;
+a 2009-02-13 23:31:30
+b 2009-02-13 23:31:30.1
+c 2009-02-13 23:31:30.12
+d 2009-02-13 23:31:30.123
+e 2009-02-13 23:31:30.1234
+f 2009-02-13 23:31:30.12345
+g 2009-02-13 23:31:30.123456
+h 2009-02-14 10:01:30
+i 2009-02-14 10:01:30.1
+j 2009-02-14 10:01:30.12
+k 2009-02-14 10:01:30.123
+l 2009-02-14 10:01:30.1234
+m 2009-02-14 10:01:30.12345
+n 2009-02-14 10:01:30.123456
+o 2
+connection master
+DROP TABLE t1, t2;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_heartbeat.result b/mysql-test/suite/rpl/r/rpl_heartbeat.result
index d4bdb77ef0b..b0f36558d10 100644
--- a/mysql-test/suite/rpl/r/rpl_heartbeat.result
+++ b/mysql-test/suite/rpl/r/rpl_heartbeat.result
@@ -41,8 +41,6 @@ show status like 'Slave_heartbeat_period';;
Variable_name Slave_heartbeat_period
Value 4.000
set @@global.slave_net_timeout= 3 /* must be a warning */;
-Warnings:
-Warning 1704 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout.
reset slave;
drop table if exists t1;
set @@global.slave_net_timeout= 10;
diff --git a/mysql-test/suite/rpl/r/rpl_heartbeat_basic.result b/mysql-test/suite/rpl/r/rpl_heartbeat_basic.result
index cc9d1f99f7c..0c274165e1e 100644
--- a/mysql-test/suite/rpl/r/rpl_heartbeat_basic.result
+++ b/mysql-test/suite/rpl/r/rpl_heartbeat_basic.result
@@ -33,8 +33,6 @@ RESET SLAVE;
*** Warning if updated slave_net_timeout < slave_heartbeat_timeout ***
SET @@global.slave_net_timeout=FLOOR(SLAVE_HEARTBEAT_TIMEOUT)-1;
-Warnings:
-Warning 1704 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout.
SET @@global.slave_net_timeout=@restore_slave_net_timeout;
RESET SLAVE;
diff --git a/mysql-test/suite/rpl/r/rpl_insert_delayed,stmt.rdiff b/mysql-test/suite/rpl/r/rpl_insert_delayed,stmt.rdiff
index ddb10d604c6..5db005bc260 100644
--- a/mysql-test/suite/rpl/r/rpl_insert_delayed,stmt.rdiff
+++ b/mysql-test/suite/rpl/r/rpl_insert_delayed,stmt.rdiff
@@ -36,7 +36,7 @@
a
1
On slave
-+show binlog events in 'slave-bin.000002' from <binlog_start> limit 1,6;
++show binlog events in 'slave-bin.000002' from <binlog_start> limit 2,6;
+Log_name Pos Event_type Server_id End_log_pos Info
+slave-bin.000002 # Query # # BEGIN
+slave-bin.000002 # Query # # use `test`; INSERT IGNORE INTO t1 VALUES(1)
diff --git a/mysql-test/suite/rpl/r/rpl_loaddatalocal.result b/mysql-test/suite/rpl/r/rpl_loaddatalocal.result
index ae91e000c37..16fd3f19c4a 100644
--- a/mysql-test/suite/rpl/r/rpl_loaddatalocal.result
+++ b/mysql-test/suite/rpl/r/rpl_loaddatalocal.result
@@ -72,7 +72,7 @@ LOAD DATA /*!10000 LOCAL INFILE 'MYSQLD_DATADIR/bug43746.sql' INTO TABLE */ t1;
LOAD DATA/*!10000 LOCAL */INFILE 'MYSQLD_DATADIR/bug43746.sql'/*!10000 INTO*/TABLE t1;
LOAD DATA/*!10000 LOCAL */INFILE 'MYSQLD_DATADIR/bug43746.sql'/* empty */INTO TABLE t1;
LOAD DATA/*!10000 LOCAL */INFILE 'MYSQLD_DATADIR/bug43746.sql' INTO/* empty */TABLE t1;
-LOAD/*!99999 special comments that do not expand */DATA/*!99999 code from the future */LOCAL INFILE 'MYSQLD_DATADIR/bug43746.sql'/*!99999 have flux capacitor */INTO/*!99999 will travel */TABLE t1;
+LOAD/*!999999 special comments that do not expand */DATA/*!999999 code from the future */LOCAL INFILE 'MYSQLD_DATADIR/bug43746.sql'/*!999999 have flux capacitor */INTO/*!999999 will travel */TABLE t1;
SET sql_mode='PIPES_AS_CONCAT,ANSI_QUOTES,NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS,STRICT_TRANS_TABLES,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,TRADITIONAL,NO_AUTO_CREATE_USER';
LOAD DATA LOCAL INFILE 'MYSQLD_DATADIR/bug43746.sql' INTO TABLE t1;
[slave]
diff --git a/mysql-test/suite/rpl/r/rpl_mariadb_slave_capability.result b/mysql-test/suite/rpl/r/rpl_mariadb_slave_capability.result
new file mode 100644
index 00000000000..8a068ad8d72
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_mariadb_slave_capability.result
@@ -0,0 +1,98 @@
+include/master-slave.inc
+[connection master]
+set @old_master_binlog_checksum= @@global.binlog_checksum;
+set @old_slave_dbug= @@global.debug_dbug;
+CREATE TABLE t1 (a INT PRIMARY KEY);
+INSERT INTO t1 VALUES (0);
+# Test slave with no capability gets dummy event, which is ignored.
+include/stop_slave.inc
+SET @@global.debug_dbug='+d,simulate_slave_capability_none';
+include/start_slave.inc
+ALTER TABLE t1 ORDER BY a;
+SET SESSION binlog_annotate_row_events = ON;
+DELETE FROM t1;
+INSERT INTO t1 /* A comment just to make the annotate event sufficiently long that the dummy event will need to get padded with spaces so that we can test that this works */ VALUES(1);
+show binlog events in 'master-bin.000001' from <binlog_start> limit 0, 10;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Annotate_rows # # DELETE FROM t1
+master-bin.000001 # Table_map # # table_id: # (test.t1)
+master-bin.000001 # Delete_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Query # # COMMIT
+master-bin.000001 # Query # # BEGIN
+master-bin.000001 # Annotate_rows # # INSERT INTO t1 /* A comment just to make the annotate event sufficiently long that the dummy event will need to get padded with spaces so that we can test that this works */ VALUES(1)
+master-bin.000001 # Table_map # # table_id: # (test.t1)
+master-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000001 # Query # # COMMIT
+SELECT * FROM t1;
+a
+1
+show relaylog events in 'slave-relay-bin.000003' from <binlog_start> limit 0,10;
+Log_name Pos Event_type Server_id End_log_pos Info
+slave-relay-bin.000003 # Query # # BEGIN
+slave-relay-bin.000003 # User var # # @`!dummyvar`=NULL
+slave-relay-bin.000003 # Table_map # # table_id: # (test.t1)
+slave-relay-bin.000003 # Delete_rows # # table_id: # flags: STMT_END_F
+slave-relay-bin.000003 # Query # # COMMIT
+slave-relay-bin.000003 # Query # # BEGIN
+slave-relay-bin.000003 # Query # # # Dummy event replacing event type 160 that slave cannot handle.
+slave-relay-bin.000003 # Table_map # # table_id: # (test.t1)
+slave-relay-bin.000003 # Write_rows # # table_id: # flags: STMT_END_F
+slave-relay-bin.000003 # Query # # COMMIT
+set @@global.debug_dbug= @old_slave_dbug;
+# Test dummy event is checksummed correctly.
+set @@global.binlog_checksum = CRC32;
+TRUNCATE t1;
+INSERT INTO t1 VALUES(2);
+show binlog events in 'master-bin.000002' from <binlog_start> limit 0, 5;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Query # # BEGIN
+master-bin.000002 # Annotate_rows # # INSERT INTO t1 VALUES(2)
+master-bin.000002 # Table_map # # table_id: # (test.t1)
+master-bin.000002 # Write_rows # # table_id: # flags: STMT_END_F
+master-bin.000002 # Query # # COMMIT
+SELECT * FROM t1;
+a
+2
+show relaylog events in 'slave-relay-bin.000005' from <binlog_start> limit 5,5;
+Log_name Pos Event_type Server_id End_log_pos Info
+slave-relay-bin.000005 # Query # # BEGIN
+slave-relay-bin.000005 # Query # # # Dummy ev
+slave-relay-bin.000005 # Table_map # # table_id: # (test.t1)
+slave-relay-bin.000005 # Write_rows # # table_id: # flags: STMT_END_F
+slave-relay-bin.000005 # Query # # COMMIT
+# Test that slave which cannot tolerate holes in binlog stream but
+# knows the event does not get dummy event
+include/stop_slave.inc
+SET @@global.debug_dbug='+d,simulate_slave_capability_old_53';
+include/start_slave.inc
+ALTER TABLE t1 ORDER BY a;
+UPDATE t1 SET a = 3;
+show binlog events in 'master-bin.000002' from <binlog_start> limit 0, 5;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Query # # BEGIN
+master-bin.000002 # Annotate_rows # # UPDATE t1 SET a = 3
+master-bin.000002 # Table_map # # table_id: # (test.t1)
+master-bin.000002 # Update_rows # # table_id: # flags: STMT_END_F
+master-bin.000002 # Query # # COMMIT
+SELECT * FROM t1;
+a
+3
+show relaylog events in 'slave-relay-bin.000006' from <binlog_start> limit 0,5;
+Log_name Pos Event_type Server_id End_log_pos Info
+slave-relay-bin.000006 # Query # # BEGIN
+slave-relay-bin.000006 # Annotate_rows # # UPDATE t1 SET a = 3
+slave-relay-bin.000006 # Table_map # # table_id: # (test.t1)
+slave-relay-bin.000006 # Update_rows # # table_id: # flags: STMT_END_F
+slave-relay-bin.000006 # Query # # COMMIT
+select @@global.log_slave_updates;
+@@global.log_slave_updates
+1
+select @@global.replicate_annotate_row_events;
+@@global.replicate_annotate_row_events
+0
+set @@global.debug_dbug= @old_slave_dbug;
+Clean up.
+set @@global.binlog_checksum = @old_master_binlog_checksum;
+DROP TABLE t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_rewrt_db.result b/mysql-test/suite/rpl/r/rpl_rewrt_db.result
index 6cbd107fdcf..12071faecfd 100644
--- a/mysql-test/suite/rpl/r/rpl_rewrt_db.result
+++ b/mysql-test/suite/rpl/r/rpl_rewrt_db.result
@@ -25,8 +25,8 @@ Warnings:
Warning 1265 Data truncated for column 'a' at row 1
Warning 1265 Data truncated for column 'c' at row 1
Warning 1265 Data truncated for column 'd' at row 1
-Warning 1264 Out of range value for column 'a' at row 2
-Warning 1264 Out of range value for column 'b' at row 2
+Warning 1265 Data truncated for column 'a' at row 2
+Warning 1265 Data truncated for column 'b' at row 2
Warning 1265 Data truncated for column 'd' at row 2
load data infile '../../std_data/loaddata1.dat' into table t1 fields terminated by ',' IGNORE 2 LINES;
select * from rewrite.t1;
@@ -40,7 +40,7 @@ load data infile '../../std_data/loaddata1.dat' into table t1 fields terminated
Warnings:
Warning 1265 Data truncated for column 'c' at row 1
Warning 1265 Data truncated for column 'd' at row 1
-Warning 1264 Out of range value for column 'b' at row 2
+Warning 1265 Data truncated for column 'b' at row 2
Warning 1265 Data truncated for column 'd' at row 2
select * from rewrite.t1;
a b c d
diff --git a/mysql-test/suite/rpl/r/rpl_rotate_logs.result b/mysql-test/suite/rpl/r/rpl_rotate_logs.result
index f10e30c698d..783c02b961c 100644
--- a/mysql-test/suite/rpl/r/rpl_rotate_logs.result
+++ b/mysql-test/suite/rpl/r/rpl_rotate_logs.result
@@ -2,9 +2,9 @@ CALL mtr.add_suppression("Unsafe statement written to the binary log using state
start slave;
Got one of the listed errors
start slave;
-ERROR HY000: Could not initialize master info structure; more error messages can be found in the MariaDB error log
+ERROR HY000: Could not initialize master info structure for ''; more error messages can be found in the MariaDB error log
change master to master_host='127.0.0.1',master_port=MASTER_PORT, master_user='root';
-ERROR HY000: Could not initialize master info structure; more error messages can be found in the MariaDB error log
+ERROR HY000: Could not initialize master info structure for ''; more error messages can be found in the MariaDB error log
reset slave;
change master to master_host='127.0.0.1',master_port=MASTER_PORT, master_user='root';
reset master;
diff --git a/mysql-test/suite/rpl/r/rpl_row_annotate_do.result b/mysql-test/suite/rpl/r/rpl_row_annotate_do.result
index a7dc2a569a1..cb1aab28603 100644
--- a/mysql-test/suite/rpl/r/rpl_row_annotate_do.result
+++ b/mysql-test/suite/rpl/r/rpl_row_annotate_do.result
@@ -55,6 +55,7 @@ a b
FLUSH LOGS;
show binlog events in 'slave-bin.000001' from <start_pos>;
Log_name Pos Event_type Server_id End_log_pos Info
+slave-bin.000001 # Binlog_checkpoint 2 # slave-bin.000001
slave-bin.000001 # Query 1 # DROP DATABASE IF EXISTS test1
slave-bin.000001 # Query 1 # CREATE DATABASE test1
slave-bin.000001 # Query 1 # use `test1`; CREATE TABLE t1(a int primary key, b int)
diff --git a/mysql-test/suite/rpl/r/rpl_row_annotate_dont.result b/mysql-test/suite/rpl/r/rpl_row_annotate_dont.result
index 2a3b5b1870e..b7648fd8592 100644
--- a/mysql-test/suite/rpl/r/rpl_row_annotate_dont.result
+++ b/mysql-test/suite/rpl/r/rpl_row_annotate_dont.result
@@ -47,6 +47,7 @@ a b
FLUSH LOGS;
show binlog events in 'slave-bin.000001' from <start_pos>;
Log_name Pos Event_type Server_id End_log_pos Info
+slave-bin.000001 # Binlog_checkpoint 2 # slave-bin.000001
slave-bin.000001 # Query 1 # DROP DATABASE IF EXISTS test1
slave-bin.000001 # Query 1 # CREATE DATABASE test1
slave-bin.000001 # Query 1 # use `test1`; CREATE TABLE t1(a int primary key, b int)
diff --git a/mysql-test/suite/rpl/r/rpl_row_log.result b/mysql-test/suite/rpl/r/rpl_row_log.result
index b9be2cd0144..13938762991 100644
--- a/mysql-test/suite/rpl/r/rpl_row_log.result
+++ b/mysql-test/suite/rpl/r/rpl_row_log.result
@@ -205,6 +205,7 @@ master-bin.000001 # Query # # COMMIT
master-bin.000001 # Rotate # # master-bin.000002;pos=4
show binlog events in 'master-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Binlog_checkpoint # # master-bin.000002
master-bin.000002 # Query # # use `test`; create table t3 (a int)ENGINE=MyISAM
master-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=MyISAM
master-bin.000002 # Query # # BEGIN
@@ -236,6 +237,7 @@ slave-bin.000001 # Query # # use `test`; create table t3 (a int)ENGINE=MyISAM
slave-bin.000001 # Rotate # # slave-bin.000002;pos=4
show binlog events in 'slave-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
slave-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=MyISAM
slave-bin.000002 # Query # # BEGIN
slave-bin.000002 # Table_map # # table_id: # (test.t2)
diff --git a/mysql-test/suite/rpl/r/rpl_row_log_innodb.result b/mysql-test/suite/rpl/r/rpl_row_log_innodb.result
index 15aa8f23b55..c9489a3dc66 100644
--- a/mysql-test/suite/rpl/r/rpl_row_log_innodb.result
+++ b/mysql-test/suite/rpl/r/rpl_row_log_innodb.result
@@ -205,6 +205,7 @@ master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Rotate # # master-bin.000002;pos=4
show binlog events in 'master-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Binlog_checkpoint # # master-bin.000002
master-bin.000002 # Query # # use `test`; create table t3 (a int)ENGINE=InnoDB
master-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=InnoDB
master-bin.000002 # Query # # BEGIN
@@ -236,6 +237,7 @@ slave-bin.000001 # Query # # use `test`; create table t3 (a int)ENGINE=InnoDB
slave-bin.000001 # Rotate # # slave-bin.000002;pos=4
show binlog events in 'slave-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
slave-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=InnoDB
slave-bin.000002 # Query # # BEGIN
slave-bin.000002 # Table_map # # table_id: # (test.t2)
diff --git a/mysql-test/suite/rpl/r/rpl_row_max_relay_size.result b/mysql-test/suite/rpl/r/rpl_row_max_relay_size.result
index 379cea4d3fc..88d68bb50ee 100644
--- a/mysql-test/suite/rpl/r/rpl_row_max_relay_size.result
+++ b/mysql-test/suite/rpl/r/rpl_row_max_relay_size.result
@@ -10,7 +10,7 @@ reset slave;
#
# Test 1
#
-set @my_max_binlog_size= @@global.max_binlog_size;
+set @my_max_binlog_size= @@global.max_binlog_size, @my_max_relay_log_size=@@global.max_relay_log_size;
set global max_binlog_size=8192;
set global max_relay_log_size=8192-1;
Warnings:
@@ -36,8 +36,10 @@ include/check_slave_is_running.inc
stop slave;
reset slave;
set global max_relay_log_size=0;
+Warnings:
+Warning 1292 Truncated incorrect max_relay_log_size value: '0'
select @@global.max_relay_log_size;
-@@global.max_relay_log_size 0
+@@global.max_relay_log_size 4096
start slave;
include/check_slave_is_running.inc
#
@@ -65,6 +67,7 @@ show master status;
File Position Binlog_Do_DB Binlog_Ignore_DB
master-bin.000002 # <Binlog_Do_DB> <Binlog_Ignore_DB>
set global max_binlog_size= @my_max_binlog_size;
+set global max_relay_log_size= @my_max_relay_log_size;
#
# End of 4.1 tests
#
diff --git a/mysql-test/suite/rpl/r/rpl_row_show_relaylog_events.result b/mysql-test/suite/rpl/r/rpl_row_show_relaylog_events.result
index 7c55e44ed59..cd72b489e1a 100644
--- a/mysql-test/suite/rpl/r/rpl_row_show_relaylog_events.result
+++ b/mysql-test/suite/rpl/r/rpl_row_show_relaylog_events.result
@@ -90,51 +90,54 @@ slave-bin.000001 # Table_map # # table_id: # (test.t1)
slave-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
slave-bin.000001 # Query # # COMMIT
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> ********
-show relaylog events in 'slave-relay-bin.000003' from <binlog_start>;
-Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000003 # Rotate # # master-bin.000001;pos=4
-slave-relay-bin.000003 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
-slave-relay-bin.000003 # Query # # use `test`; CREATE TABLE t1 (a INT)
-slave-relay-bin.000003 # Query # # BEGIN
-slave-relay-bin.000003 # Table_map # # table_id: # (test.t1)
-slave-relay-bin.000003 # Write_rows # # table_id: # flags: STMT_END_F
-slave-relay-bin.000003 # Query # # COMMIT
-slave-relay-bin.000003 # Query # # BEGIN
-slave-relay-bin.000003 # Table_map # # table_id: # (test.t1)
-slave-relay-bin.000003 # Write_rows # # table_id: # flags: STMT_END_F
-slave-relay-bin.000003 # Query # # COMMIT
-slave-relay-bin.000003 # Query # # BEGIN
-slave-relay-bin.000003 # Table_map # # table_id: # (test.t1)
-slave-relay-bin.000003 # Write_rows # # table_id: # flags: STMT_END_F
-slave-relay-bin.000003 # Query # # COMMIT
+show relaylog events in 'slave-relay-bin.000002' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+slave-relay-bin.000002 # Rotate # # master-bin.000001;pos=4
+slave-relay-bin.000002 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+slave-relay-bin.000002 # Binlog_checkpoint # # master-bin.000001
+slave-relay-bin.000002 # Query # # use `test`; CREATE TABLE t1 (a INT)
+slave-relay-bin.000002 # Query # # BEGIN
+slave-relay-bin.000002 # Table_map # # table_id: # (test.t1)
+slave-relay-bin.000002 # Write_rows # # table_id: # flags: STMT_END_F
+slave-relay-bin.000002 # Query # # COMMIT
+slave-relay-bin.000002 # Query # # BEGIN
+slave-relay-bin.000002 # Table_map # # table_id: # (test.t1)
+slave-relay-bin.000002 # Write_rows # # table_id: # flags: STMT_END_F
+slave-relay-bin.000002 # Query # # COMMIT
+slave-relay-bin.000002 # Query # # BEGIN
+slave-relay-bin.000002 # Table_map # # table_id: # (test.t1)
+slave-relay-bin.000002 # Write_rows # # table_id: # flags: STMT_END_F
+slave-relay-bin.000002 # Query # # COMMIT
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> LIMIT 1 ********
-show relaylog events in 'slave-relay-bin.000003' from <binlog_start> limit 1;
+show relaylog events in 'slave-relay-bin.000002' from <binlog_start> limit 1;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000003 # Rotate # # master-bin.000001;pos=4
+slave-relay-bin.000002 # Rotate # # master-bin.000001;pos=4
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> LIMIT 1,3 ********
-show relaylog events in 'slave-relay-bin.000003' from <binlog_start> limit 1,3;
+show relaylog events in 'slave-relay-bin.000002' from <binlog_start> limit 1,3;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000003 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
-slave-relay-bin.000003 # Query # # use `test`; CREATE TABLE t1 (a INT)
-slave-relay-bin.000003 # Query # # BEGIN
+slave-relay-bin.000002 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+slave-relay-bin.000002 # Binlog_checkpoint # # master-bin.000001
+slave-relay-bin.000002 # Query # # use `test`; CREATE TABLE t1 (a INT)
******** [slave] SHOW RELAYLOG EVENTS ********
show relaylog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000002 # Rotate # # slave-relay-bin.000003;pos=4
+slave-relay-bin.000001 # Rotate # # slave-relay-bin.000002;pos=4
FLUSH LOGS;
FLUSH LOGS;
DROP TABLE t1;
******** [master] SHOW BINLOG EVENTS IN <FILE> ********
show binlog events in 'master-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Binlog_checkpoint # # master-bin.000002
master-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
******** [master] SHOW BINLOG EVENTS IN <FILE> LIMIT 1 ********
show binlog events in 'master-bin.000002' from <binlog_start> limit 1;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
+master-bin.000002 # Binlog_checkpoint # # master-bin.000002
******** [master] SHOW BINLOG EVENTS IN <FILE> LIMIT 1,3 ********
show binlog events in 'master-bin.000002' from <binlog_start> limit 1,3;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
******** [master] SHOW BINLOG EVENTS ********
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
@@ -155,14 +158,16 @@ master-bin.000001 # Rotate # # master-bin.000002;pos=4
******** [slave] SHOW BINLOG EVENTS IN <FILE> ********
show binlog events in 'slave-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
slave-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
******** [slave] SHOW BINLOG EVENTS IN <FILE> LIMIT 1 ********
show binlog events in 'slave-bin.000002' from <binlog_start> limit 1;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
+slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
******** [slave] SHOW BINLOG EVENTS IN <FILE> LIMIT 1,3 ********
show binlog events in 'slave-bin.000002' from <binlog_start> limit 1,3;
Log_name Pos Event_type Server_id End_log_pos Info
+slave-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
******** [slave] SHOW BINLOG EVENTS ********
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
@@ -181,23 +186,26 @@ slave-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F
slave-bin.000001 # Query # # COMMIT
slave-bin.000001 # Rotate # # slave-bin.000002;pos=4
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> ********
-show relaylog events in 'slave-relay-bin.000006' from <binlog_start>;
+show relaylog events in 'slave-relay-bin.000005' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000006 # Rotate # # master-bin.000002;pos=4
-slave-relay-bin.000006 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
-slave-relay-bin.000006 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
+slave-relay-bin.000005 # Rotate # # master-bin.000002;pos=4
+slave-relay-bin.000005 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+slave-relay-bin.000005 # Binlog_checkpoint # # master-bin.000001
+slave-relay-bin.000005 # Binlog_checkpoint # # master-bin.000002
+slave-relay-bin.000005 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> LIMIT 1 ********
-show relaylog events in 'slave-relay-bin.000006' from <binlog_start> limit 1;
+show relaylog events in 'slave-relay-bin.000005' from <binlog_start> limit 1;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000006 # Rotate # # master-bin.000002;pos=4
+slave-relay-bin.000005 # Rotate # # master-bin.000002;pos=4
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> LIMIT 1,3 ********
-show relaylog events in 'slave-relay-bin.000006' from <binlog_start> limit 1,3;
+show relaylog events in 'slave-relay-bin.000005' from <binlog_start> limit 1,3;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000006 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
-slave-relay-bin.000006 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
+slave-relay-bin.000005 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+slave-relay-bin.000005 # Binlog_checkpoint # # master-bin.000001
+slave-relay-bin.000005 # Binlog_checkpoint # # master-bin.000002
******** [slave] SHOW RELAYLOG EVENTS ********
show relaylog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000005 # Rotate # # master-bin.000002;pos=4
-slave-relay-bin.000005 # Rotate # # slave-relay-bin.000006;pos=4
+slave-relay-bin.000004 # Rotate # # master-bin.000002;pos=4
+slave-relay-bin.000004 # Rotate # # slave-relay-bin.000005;pos=4
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_skip_replication.result b/mysql-test/suite/rpl/r/rpl_skip_replication.result
index c19f9009021..92b5fa5f629 100644
--- a/mysql-test/suite/rpl/r/rpl_skip_replication.result
+++ b/mysql-test/suite/rpl/r/rpl_skip_replication.result
@@ -10,7 +10,7 @@ SELECT @@global.replicate_events_marked_for_skip;
@@global.replicate_events_marked_for_skip
replicate
SET GLOBAL replicate_events_marked_for_skip=FILTER_ON_SLAVE;
-ERROR HY000: This operation cannot be performed with a running slave; run STOP SLAVE first
+ERROR HY000: This operation cannot be performed as you have a running slave ''; run STOP SLAVE '' first
SELECT @@global.replicate_events_marked_for_skip;
@@global.replicate_events_marked_for_skip
replicate
diff --git a/mysql-test/suite/rpl/r/rpl_start_slave_deadlock_sys_vars.result b/mysql-test/suite/rpl/r/rpl_start_slave_deadlock_sys_vars.result
deleted file mode 100644
index f69a323f980..00000000000
--- a/mysql-test/suite/rpl/r/rpl_start_slave_deadlock_sys_vars.result
+++ /dev/null
@@ -1,31 +0,0 @@
-include/master-slave.inc
-[connection master]
-# connection: slave
-SET @save_slave_net_timeout = @@GLOBAL.slave_net_timeout;
-STOP SLAVE;
-include/wait_for_slave_to_stop.inc
-# open an extra connection to the slave
-# connection: slave2
-# set debug synchronization point
-SET DEBUG_SYNC='fix_slave_net_timeout SIGNAL parked WAIT_FOR go';
-# attempt to set slave_net_timeout, will wait on sync point
-SET @@GLOBAL.slave_net_timeout = 100;
-# connection: slave
-SET DEBUG_SYNC='now WAIT_FOR parked';
-# connection: slave1
-# attempt to start the SQL thread
-START SLAVE SQL_THREAD;
-# connection: slave
-# wait until SQL thread has been started
-# sleep a bit so that the SQL thread THD handle is initialized
-# signal the set slave_net_timeout to continue
-SET DEBUG_SYNC='now SIGNAL go';
-# connection: slave2
-# reap result of set slave_net_timeout
-# connection: slave1
-# reap result of starting the SQL thread
-# disconnect: slave2
-# connection: slave
-# cleanup
-SET @@GLOBAL.slave_net_timeout = @save_slave_net_timeout;
-include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_stm_log.result b/mysql-test/suite/rpl/r/rpl_stm_log.result
index 3bb3f347a43..ea4fc259b14 100644
--- a/mysql-test/suite/rpl/r/rpl_stm_log.result
+++ b/mysql-test/suite/rpl/r/rpl_stm_log.result
@@ -205,6 +205,7 @@ master-bin.000001 # Query # # COMMIT
master-bin.000001 # Rotate # # master-bin.000002;pos=4
show binlog events in 'master-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Binlog_checkpoint # # master-bin.000002
master-bin.000002 # Query # # use `test`; create table t3 (a int)ENGINE=MyISAM
master-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=MyISAM
master-bin.000002 # Query # # BEGIN
@@ -235,6 +236,7 @@ slave-bin.000001 # Query # # use `test`; create table t3 (a int)ENGINE=MyISAM
slave-bin.000001 # Rotate # # slave-bin.000002;pos=4
show binlog events in 'slave-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
slave-bin.000002 # Query # # use `test`; create table t2 (n int)ENGINE=MyISAM
slave-bin.000002 # Query # # BEGIN
slave-bin.000002 # Query # # use `test`; insert into t2 values (1)
diff --git a/mysql-test/suite/rpl/r/rpl_stm_max_relay_size.result b/mysql-test/suite/rpl/r/rpl_stm_max_relay_size.result
index 379cea4d3fc..88d68bb50ee 100644
--- a/mysql-test/suite/rpl/r/rpl_stm_max_relay_size.result
+++ b/mysql-test/suite/rpl/r/rpl_stm_max_relay_size.result
@@ -10,7 +10,7 @@ reset slave;
#
# Test 1
#
-set @my_max_binlog_size= @@global.max_binlog_size;
+set @my_max_binlog_size= @@global.max_binlog_size, @my_max_relay_log_size=@@global.max_relay_log_size;
set global max_binlog_size=8192;
set global max_relay_log_size=8192-1;
Warnings:
@@ -36,8 +36,10 @@ include/check_slave_is_running.inc
stop slave;
reset slave;
set global max_relay_log_size=0;
+Warnings:
+Warning 1292 Truncated incorrect max_relay_log_size value: '0'
select @@global.max_relay_log_size;
-@@global.max_relay_log_size 0
+@@global.max_relay_log_size 4096
start slave;
include/check_slave_is_running.inc
#
@@ -65,6 +67,7 @@ show master status;
File Position Binlog_Do_DB Binlog_Ignore_DB
master-bin.000002 # <Binlog_Do_DB> <Binlog_Ignore_DB>
set global max_binlog_size= @my_max_binlog_size;
+set global max_relay_log_size= @my_max_relay_log_size;
#
# End of 4.1 tests
#
diff --git a/mysql-test/suite/rpl/r/rpl_stm_mix_show_relaylog_events.result b/mysql-test/suite/rpl/r/rpl_stm_mix_show_relaylog_events.result
index f706fa0fe75..490c4998b5d 100644
--- a/mysql-test/suite/rpl/r/rpl_stm_mix_show_relaylog_events.result
+++ b/mysql-test/suite/rpl/r/rpl_stm_mix_show_relaylog_events.result
@@ -78,48 +78,51 @@ slave-bin.000001 # Query # # BEGIN
slave-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (3)
slave-bin.000001 # Query # # COMMIT
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> ********
-show relaylog events in 'slave-relay-bin.000003' from <binlog_start>;
-Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000003 # Rotate # # master-bin.000001;pos=4
-slave-relay-bin.000003 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
-slave-relay-bin.000003 # Query # # use `test`; CREATE TABLE t1 (a INT)
-slave-relay-bin.000003 # Query # # BEGIN
-slave-relay-bin.000003 # Query # # use `test`; INSERT INTO t1 VALUES (1)
-slave-relay-bin.000003 # Query # # COMMIT
-slave-relay-bin.000003 # Query # # BEGIN
-slave-relay-bin.000003 # Query # # use `test`; INSERT INTO t1 VALUES (2)
-slave-relay-bin.000003 # Query # # COMMIT
-slave-relay-bin.000003 # Query # # BEGIN
-slave-relay-bin.000003 # Query # # use `test`; INSERT INTO t1 VALUES (3)
-slave-relay-bin.000003 # Query # # COMMIT
+show relaylog events in 'slave-relay-bin.000002' from <binlog_start>;
+Log_name Pos Event_type Server_id End_log_pos Info
+slave-relay-bin.000002 # Rotate # # master-bin.000001;pos=4
+slave-relay-bin.000002 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+slave-relay-bin.000002 # Binlog_checkpoint # # master-bin.000001
+slave-relay-bin.000002 # Query # # use `test`; CREATE TABLE t1 (a INT)
+slave-relay-bin.000002 # Query # # BEGIN
+slave-relay-bin.000002 # Query # # use `test`; INSERT INTO t1 VALUES (1)
+slave-relay-bin.000002 # Query # # COMMIT
+slave-relay-bin.000002 # Query # # BEGIN
+slave-relay-bin.000002 # Query # # use `test`; INSERT INTO t1 VALUES (2)
+slave-relay-bin.000002 # Query # # COMMIT
+slave-relay-bin.000002 # Query # # BEGIN
+slave-relay-bin.000002 # Query # # use `test`; INSERT INTO t1 VALUES (3)
+slave-relay-bin.000002 # Query # # COMMIT
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> LIMIT 1 ********
-show relaylog events in 'slave-relay-bin.000003' from <binlog_start> limit 1;
+show relaylog events in 'slave-relay-bin.000002' from <binlog_start> limit 1;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000003 # Rotate # # master-bin.000001;pos=4
+slave-relay-bin.000002 # Rotate # # master-bin.000001;pos=4
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> LIMIT 1,3 ********
-show relaylog events in 'slave-relay-bin.000003' from <binlog_start> limit 1,3;
+show relaylog events in 'slave-relay-bin.000002' from <binlog_start> limit 1,3;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000003 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
-slave-relay-bin.000003 # Query # # use `test`; CREATE TABLE t1 (a INT)
-slave-relay-bin.000003 # Query # # BEGIN
+slave-relay-bin.000002 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+slave-relay-bin.000002 # Binlog_checkpoint # # master-bin.000001
+slave-relay-bin.000002 # Query # # use `test`; CREATE TABLE t1 (a INT)
******** [slave] SHOW RELAYLOG EVENTS ********
show relaylog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000002 # Rotate # # slave-relay-bin.000003;pos=4
+slave-relay-bin.000001 # Rotate # # slave-relay-bin.000002;pos=4
FLUSH LOGS;
FLUSH LOGS;
DROP TABLE t1;
******** [master] SHOW BINLOG EVENTS IN <FILE> ********
show binlog events in 'master-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Binlog_checkpoint # # master-bin.000002
master-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
******** [master] SHOW BINLOG EVENTS IN <FILE> LIMIT 1 ********
show binlog events in 'master-bin.000002' from <binlog_start> limit 1;
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
+master-bin.000002 # Binlog_checkpoint # # master-bin.000002
******** [master] SHOW BINLOG EVENTS IN <FILE> LIMIT 1,3 ********
show binlog events in 'master-bin.000002' from <binlog_start> limit 1,3;
Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
******** [master] SHOW BINLOG EVENTS ********
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
@@ -137,14 +140,16 @@ master-bin.000001 # Rotate # # master-bin.000002;pos=4
******** [slave] SHOW BINLOG EVENTS IN <FILE> ********
show binlog events in 'slave-bin.000002' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
+slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
slave-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
******** [slave] SHOW BINLOG EVENTS IN <FILE> LIMIT 1 ********
show binlog events in 'slave-bin.000002' from <binlog_start> limit 1;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
+slave-bin.000002 # Binlog_checkpoint # # slave-bin.000002
******** [slave] SHOW BINLOG EVENTS IN <FILE> LIMIT 1,3 ********
show binlog events in 'slave-bin.000002' from <binlog_start> limit 1,3;
Log_name Pos Event_type Server_id End_log_pos Info
+slave-bin.000002 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
******** [slave] SHOW BINLOG EVENTS ********
show binlog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
@@ -160,23 +165,26 @@ slave-bin.000001 # Query # # use `test`; INSERT INTO t1 VALUES (3)
slave-bin.000001 # Query # # COMMIT
slave-bin.000001 # Rotate # # slave-bin.000002;pos=4
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> ********
-show relaylog events in 'slave-relay-bin.000006' from <binlog_start>;
+show relaylog events in 'slave-relay-bin.000005' from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000006 # Rotate # # master-bin.000002;pos=4
-slave-relay-bin.000006 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
-slave-relay-bin.000006 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
+slave-relay-bin.000005 # Rotate # # master-bin.000002;pos=4
+slave-relay-bin.000005 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+slave-relay-bin.000005 # Binlog_checkpoint # # master-bin.000001
+slave-relay-bin.000005 # Binlog_checkpoint # # master-bin.000002
+slave-relay-bin.000005 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> LIMIT 1 ********
-show relaylog events in 'slave-relay-bin.000006' from <binlog_start> limit 1;
+show relaylog events in 'slave-relay-bin.000005' from <binlog_start> limit 1;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000006 # Rotate # # master-bin.000002;pos=4
+slave-relay-bin.000005 # Rotate # # master-bin.000002;pos=4
******** [slave] SHOW RELAYLOG EVENTS IN <FILE> LIMIT 1,3 ********
-show relaylog events in 'slave-relay-bin.000006' from <binlog_start> limit 1,3;
+show relaylog events in 'slave-relay-bin.000005' from <binlog_start> limit 1,3;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000006 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
-slave-relay-bin.000006 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
+slave-relay-bin.000005 # Format_desc # # SERVER_VERSION, BINLOG_VERSION
+slave-relay-bin.000005 # Binlog_checkpoint # # master-bin.000001
+slave-relay-bin.000005 # Binlog_checkpoint # # master-bin.000002
******** [slave] SHOW RELAYLOG EVENTS ********
show relaylog events from <binlog_start>;
Log_name Pos Event_type Server_id End_log_pos Info
-slave-relay-bin.000005 # Rotate # # master-bin.000002;pos=4
-slave-relay-bin.000005 # Rotate # # slave-relay-bin.000006;pos=4
+slave-relay-bin.000004 # Rotate # # master-bin.000002;pos=4
+slave-relay-bin.000004 # Rotate # # slave-relay-bin.000005;pos=4
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_conditional_comments.test b/mysql-test/suite/rpl/t/rpl_conditional_comments.test
index 88adf3a20f1..0e2c108bf6e 100644
--- a/mysql-test/suite/rpl/t/rpl_conditional_comments.test
+++ b/mysql-test/suite/rpl/t/rpl_conditional_comments.test
@@ -4,7 +4,7 @@
# master. So they become common comments and will not be applied on slave.
#
# - Example:
-# 'INSERT INTO t1 VALUES (1) /*!10000, (2)*/ /*!99999 ,(3)*/
+# 'INSERT INTO t1 VALUES (1) /*!10000, (2)*/ /*!999999 ,(3)*/
# will be binlogged as
# 'INSERT INTO t1 VALUES (1) /*!10000, (2)*/ /* 99999 ,(3)*/'.
###############################################################################
@@ -21,7 +21,7 @@ let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1);
--echo # In a statement, some CCs are applied while others are not. The CCs
--echo # which are not applied on master will be binlogged as common comments.
-/*!99999 --- */INSERT /*!INTO*/ /*!10000 t1 */ VALUES(10) /*!99999 ,(11)*/;
+/*!999999 --- */INSERT /*!INTO*/ /*!10000 t1 */ VALUES(10) /*!999999 ,(11)*/;
source include/show_binlog_events.inc;
let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1);
@@ -35,7 +35,7 @@ sync_slave_with_master;
--echo # Verify whether it can be binlogged correctly when executing prepared
--echo # statement.
--connection master
-PREPARE stmt FROM 'INSERT INTO /*!99999 blabla*/ t1 VALUES(60) /*!99999 ,(61)*/';
+PREPARE stmt FROM 'INSERT INTO /*!999999 blabla*/ t1 VALUES(60) /*!999999 ,(61)*/';
EXECUTE stmt;
DROP TABLE t1;
CREATE TABLE t1(c1 INT);
@@ -48,7 +48,7 @@ sync_slave_with_master;
--connection master
--echo
SET @value=62;
-PREPARE stmt FROM 'INSERT INTO /*!99999 blabla */ t1 VALUES(?) /*!99999 ,(63)*/';
+PREPARE stmt FROM 'INSERT INTO /*!999999 blabla */ t1 VALUES(?) /*!999999 ,(63)*/';
EXECUTE stmt USING @value;
DROP TABLE t1;
CREATE TABLE t1(c1 INT);
@@ -68,7 +68,7 @@ sync_slave_with_master;
--echo # comments
--connection master
--error 1064
-SELECT c1 FROM /*!99999 t1 WHEREN;
+SELECT c1 FROM /*!999999 t1 WHEREN;
DROP TABLE t1;
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_flush_logs.test b/mysql-test/suite/rpl/t/rpl_flush_logs.test
index 6e9de634157..1d19576d47c 100644
--- a/mysql-test/suite/rpl/t/rpl_flush_logs.test
+++ b/mysql-test/suite/rpl/t/rpl_flush_logs.test
@@ -31,8 +31,9 @@ connection master;
flush relay logs;
sync_slave_with_master;
---echo # Check the 'slave-relay-bin.000003' file is created
+--echo # Check the 'slave-relay-bin.000003' file is not created
--echo # after executed 'flush relay logs' statement.
+--error 1
file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000003;
connection master;
@@ -89,10 +90,10 @@ file_exists $MYSQLTEST_VARDIR/mysqld.1/data/master-bin.000001;
# Test 'flush error logs, relay logs' statement
sync_slave_with_master;
---echo # Make sure the 'slave-relay-bin.000006' file does not exist
+--echo # Make sure the 'slave-relay-bin.000005' file does not exist
--echo # exist before execute 'flush error logs, relay logs' statement.
--error 1
-file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000006;
+file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000005;
connection master;
@@ -107,19 +108,18 @@ file_exists $MYSQLTEST_VARDIR/tmp/master_log.err;
file_exists $MYSQLTEST_VARDIR/mysqld.1/data/master-bin.000003;
sync_slave_with_master;
---echo # Check the 'slave-relay-bin.000006' file is created after
+--echo # Check the 'slave-relay-bin.000004' file is created after
--echo # execute 'flush error logs, relay logs' statement.
-file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000006;
-
+file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000004;
# Test 'flush logs' statement
---echo # Make sure the 'slave-relay-bin.000007' and 'slave-relay-bin.000008'
+--echo # Make sure the 'slave-relay-bin.000005' and 'slave-relay-bin.000006'
--echo # files do not exist before execute 'flush error logs, relay logs'
--echo # statement.
--error 1
-file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000007;
+file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000005;
--error 1
-file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000008;
+file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000006;
connection master;
@@ -133,9 +133,9 @@ file_exists $MYSQLTEST_VARDIR/tmp/master_log.err;
file_exists $MYSQLTEST_VARDIR/mysqld.1/data/master-bin.000003;
sync_slave_with_master;
---echo # Check the 'slave-relay-bin.000007' and 'slave-relay-bin.000008'
+--echo # Check the 'slave-relay-bin.000005' and 'slave-relay-bin.000006'
--echo # files are created after execute 'flush logs' statement.
-file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000007;
-file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000008;
+file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000005;
+file_exists $MYSQLTEST_VARDIR/mysqld.2/data/slave-relay-bin.000006;
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_function_defaults.test b/mysql-test/suite/rpl/t/rpl_function_defaults.test
new file mode 100644
index 00000000000..24bec10d305
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_function_defaults.test
@@ -0,0 +1,93 @@
+--echo #
+--echo # Test of function defaults on replicated tables.
+--echo #
+
+source include/master-slave.inc;
+
+--echo connection master
+connection master;
+SET TIME_ZONE="+10:30";
+SET TIMESTAMP=123456.789123;
+SELECT CURRENT_TIMESTAMP;
+
+--echo connection slave
+connection slave;
+SET TIME_ZONE="+00:00";
+SET TIMESTAMP=987654321.123456;
+SELECT CURRENT_TIMESTAMP;
+
+--echo connection master
+connection master;
+CREATE TABLE t1 (
+ a TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ b TIMESTAMP(1) NOT NULL DEFAULT CURRENT_TIMESTAMP(1),
+ c TIMESTAMP(2) NOT NULL DEFAULT CURRENT_TIMESTAMP(2),
+ d TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3),
+ e TIMESTAMP(4) NOT NULL DEFAULT CURRENT_TIMESTAMP(4),
+ f TIMESTAMP(5) NOT NULL DEFAULT CURRENT_TIMESTAMP(5),
+ g TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6),
+ h DATETIME DEFAULT CURRENT_TIMESTAMP,
+ i DATETIME(1) DEFAULT CURRENT_TIMESTAMP(1),
+ j DATETIME(2) DEFAULT CURRENT_TIMESTAMP(2),
+ k DATETIME(3) DEFAULT CURRENT_TIMESTAMP(3),
+ l DATETIME(4) DEFAULT CURRENT_TIMESTAMP(4),
+ m DATETIME(5) DEFAULT CURRENT_TIMESTAMP(5),
+ n DATETIME(6) DEFAULT CURRENT_TIMESTAMP(6),
+ o INT
+);
+
+INSERT INTO t1 ( o ) VALUES ( 1 );
+
+CREATE TABLE t2 (
+ a TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP,
+ b TIMESTAMP(1) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(1),
+ c TIMESTAMP(2) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(2),
+ d TIMESTAMP(3) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(3),
+ e TIMESTAMP(4) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(4),
+ f TIMESTAMP(5) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(5),
+ g TIMESTAMP(6) NOT NULL DEFAULT '0000-00-00 00:00:00' ON UPDATE CURRENT_TIMESTAMP(6),
+ h DATETIME ON UPDATE CURRENT_TIMESTAMP,
+ i DATETIME(1) ON UPDATE CURRENT_TIMESTAMP(1),
+ j DATETIME(2) ON UPDATE CURRENT_TIMESTAMP(2),
+ k DATETIME(3) ON UPDATE CURRENT_TIMESTAMP(3),
+ l DATETIME(4) ON UPDATE CURRENT_TIMESTAMP(4),
+ m DATETIME(5) ON UPDATE CURRENT_TIMESTAMP(5),
+ n DATETIME(6) ON UPDATE CURRENT_TIMESTAMP(6),
+ o INT
+);
+
+INSERT INTO t2 ( o ) VALUES ( 1 );
+
+--echo sync_slave_with_master
+sync_slave_with_master;
+
+--echo connection slave
+connection slave;
+
+query_vertical SELECT * FROM t1;
+query_vertical SELECT * FROM t2;
+
+--echo connection master
+connection master;
+
+SET TIMESTAMP=1234567890.123456;
+SELECT CURRENT_TIMESTAMP;
+
+UPDATE t1 SET o = 2;
+UPDATE t2 SET o = 2;
+
+--echo sync_slave_with_master
+sync_slave_with_master;
+
+--echo connection slave
+connection slave;
+
+query_vertical SELECT * FROM t1;
+query_vertical SELECT * FROM t2;
+
+--echo connection master
+connection master;
+
+DROP TABLE t1, t2;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_loaddatalocal.test b/mysql-test/suite/rpl/t/rpl_loaddatalocal.test
index 4ebe572741f..30c9ff1e9e2 100644
--- a/mysql-test/suite/rpl/t/rpl_loaddatalocal.test
+++ b/mysql-test/suite/rpl/t/rpl_loaddatalocal.test
@@ -141,7 +141,7 @@ eval LOAD DATA/*!10000 LOCAL */INFILE '$MYSQLD_DATADIR/bug43746.sql'/* empty */I
eval LOAD DATA/*!10000 LOCAL */INFILE '$MYSQLD_DATADIR/bug43746.sql' INTO/* empty */TABLE t1;
--replace_result $MYSQLD_DATADIR MYSQLD_DATADIR
-eval LOAD/*!99999 special comments that do not expand */DATA/*!99999 code from the future */LOCAL INFILE '$MYSQLD_DATADIR/bug43746.sql'/*!99999 have flux capacitor */INTO/*!99999 will travel */TABLE t1;
+eval LOAD/*!999999 special comments that do not expand */DATA/*!999999 code from the future */LOCAL INFILE '$MYSQLD_DATADIR/bug43746.sql'/*!999999 have flux capacitor */INTO/*!999999 will travel */TABLE t1;
SET sql_mode='PIPES_AS_CONCAT,ANSI_QUOTES,NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS,STRICT_TRANS_TABLES,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,TRADITIONAL,NO_AUTO_CREATE_USER';
diff --git a/mysql-test/suite/rpl/t/rpl_mariadb_slave_capability.test b/mysql-test/suite/rpl/t/rpl_mariadb_slave_capability.test
new file mode 100644
index 00000000000..251136a2fe1
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_mariadb_slave_capability.test
@@ -0,0 +1,104 @@
+--source include/master-slave.inc
+--source include/have_debug.inc
+--source include/have_binlog_format_row.inc
+
+connection master;
+
+set @old_master_binlog_checksum= @@global.binlog_checksum;
+set @old_slave_dbug= @@global.debug_dbug;
+CREATE TABLE t1 (a INT PRIMARY KEY);
+INSERT INTO t1 VALUES (0);
+
+sync_slave_with_master;
+connection slave;
+
+--echo # Test slave with no capability gets dummy event, which is ignored.
+--source include/stop_slave.inc
+SET @@global.debug_dbug='+d,simulate_slave_capability_none';
+--source include/start_slave.inc
+connection master;
+# Add a dummy event just to have something to sync_slave_with_master on.
+# Otherwise we occasionally get different $relaylog_start, depending on
+# whether Format_description_log_event was written to relay log or not
+# at the time of SHOW SLAVE STATUS.
+ALTER TABLE t1 ORDER BY a;
+sync_slave_with_master;
+connection slave;
+let $relaylog_start= query_get_value(SHOW SLAVE STATUS, Relay_Log_Pos, 1);
+
+connection master;
+SET SESSION binlog_annotate_row_events = ON;
+let $binlog_file= query_get_value(SHOW MASTER STATUS, File, 1);
+let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1);
+# A short event, to test when we need to use user_var_event for dummy event.
+DELETE FROM t1;
+INSERT INTO t1 /* A comment just to make the annotate event sufficiently long that the dummy event will need to get padded with spaces so that we can test that this works */ VALUES(1);
+let $binlog_limit= 0, 10;
+--source include/show_binlog_events.inc
+sync_slave_with_master;
+connection slave;
+
+SELECT * FROM t1;
+let $binlog_file= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1);
+let $binlog_start= $relaylog_start;
+let $binlog_limit=0,10;
+--source include/show_relaylog_events.inc
+set @@global.debug_dbug= @old_slave_dbug;
+
+--echo # Test dummy event is checksummed correctly.
+
+connection master;
+set @@global.binlog_checksum = CRC32;
+TRUNCATE t1;
+let $binlog_file= query_get_value(SHOW MASTER STATUS, File, 1);
+let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1);
+INSERT INTO t1 VALUES(2);
+let $binlog_limit= 0, 5;
+--source include/show_binlog_events.inc
+sync_slave_with_master;
+connection slave;
+
+SELECT * FROM t1;
+let $binlog_file= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1);
+let $binlog_start= 0;
+let $binlog_limit=5,5;
+--source include/show_relaylog_events.inc
+
+--echo # Test that slave which cannot tolerate holes in binlog stream but
+--echo # knows the event does not get dummy event
+
+--source include/stop_slave.inc
+SET @@global.debug_dbug='+d,simulate_slave_capability_old_53';
+--source include/start_slave.inc
+connection master;
+ALTER TABLE t1 ORDER BY a;
+sync_slave_with_master;
+connection slave;
+let $relaylog_start= query_get_value(SHOW SLAVE STATUS, Relay_Log_Pos, 1);
+
+connection master;
+let $binlog_file= query_get_value(SHOW MASTER STATUS, File, 1);
+let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1);
+UPDATE t1 SET a = 3;
+let $binlog_limit= 0, 5;
+--source include/show_binlog_events.inc
+sync_slave_with_master;
+connection slave;
+
+SELECT * FROM t1;
+let $binlog_file= query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1);
+let $binlog_start= $relaylog_start;
+let $binlog_limit=0,5;
+--source include/show_relaylog_events.inc
+
+select @@global.log_slave_updates;
+select @@global.replicate_annotate_row_events;
+
+set @@global.debug_dbug= @old_slave_dbug;
+
+--echo Clean up.
+connection master;
+set @@global.binlog_checksum = @old_master_binlog_checksum;
+DROP TABLE t1;
+sync_slave_with_master;
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync.test b/mysql-test/suite/rpl/t/rpl_semi_sync.test
index 664ea732cac..ac5511e28a3 100644
--- a/mysql-test/suite/rpl/t/rpl_semi_sync.test
+++ b/mysql-test/suite/rpl/t/rpl_semi_sync.test
@@ -82,6 +82,7 @@ show variables like 'rpl_semi_sync_master_enabled';
echo [ status of semi-sync on master should be ON even without any semi-sync slaves ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
+--replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
@@ -112,6 +113,7 @@ enable_query_log;
echo [ status of semi-sync on master should be OFF ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
+--replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
disable_query_log;
@@ -157,6 +159,7 @@ echo [ initial master state after the semi-sync slave connected ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
replace_result $engine_type ENGINE_TYPE;
@@ -165,6 +168,7 @@ eval create table t1(a int) engine = $engine_type;
echo [ master state after CREATE TABLE statement ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
# After fix of BUG#45848, semi-sync slave should not create any extra
@@ -186,6 +190,7 @@ enable_query_log;
echo [ master status after inserts ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
sync_slave_with_master;
@@ -244,6 +249,7 @@ set global rpl_semi_sync_master_timeout= 5000;
echo [ master status should be ON ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
show status like 'Rpl_semi_sync_master_clients';
@@ -260,6 +266,7 @@ source include/wait_for_status_var.inc;
echo [ master status should be OFF ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
# Semi-sync status on master is now OFF, so all these transactions
@@ -278,6 +285,7 @@ insert into t1 values (100);
echo [ master status should be OFF ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
@@ -309,6 +317,7 @@ echo [ on master ];
echo [ master status should be ON again after slave catches up ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 305 304
show status like 'Rpl_semi_sync_master_yes_tx';
show status like 'Rpl_semi_sync_master_clients';
@@ -328,12 +337,14 @@ source include/stop_slave.inc;
connection master;
echo [ Semi-sync master status variables before FLUSH STATUS ];
SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx';
+--replace_result 306 305
SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx';
# Do not write the FLUSH STATUS to binlog, to make sure we'll get a
# clean status after this.
FLUSH NO_WRITE_TO_BINLOG STATUS;
echo [ Semi-sync master status variables after FLUSH STATUS ];
SHOW STATUS LIKE 'Rpl_semi_sync_master_no_tx';
+--replace_result 306 305
SHOW STATUS LIKE 'Rpl_semi_sync_master_yes_tx';
connection master;
@@ -382,6 +393,7 @@ reset master;
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 306 305
show status like 'Rpl_semi_sync_master_yes_tx';
connection slave;
@@ -434,6 +446,7 @@ echo [ on master ];
echo [ master semi-sync status should be ON ];
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 306 305
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
@@ -484,6 +497,7 @@ echo [ master semi-sync should be ON ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 306 305
show status like 'Rpl_semi_sync_master_yes_tx';
insert into t1 values (4);
insert into t1 values (5);
@@ -491,6 +505,7 @@ echo [ master semi-sync should be ON ];
show status like 'Rpl_semi_sync_master_clients';
show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_no_tx';
+--replace_result 306 305
show status like 'Rpl_semi_sync_master_yes_tx';
--echo #
diff --git a/mysql-test/suite/rpl/t/rpl_start_slave_deadlock_sys_vars.test b/mysql-test/suite/rpl/t/rpl_start_slave_deadlock_sys_vars.test
deleted file mode 100644
index 3eaff761108..00000000000
--- a/mysql-test/suite/rpl/t/rpl_start_slave_deadlock_sys_vars.test
+++ /dev/null
@@ -1,57 +0,0 @@
-source include/have_debug_sync.inc;
-source include/master-slave.inc;
-
---echo # connection: slave
-connection slave;
-SET @save_slave_net_timeout = @@GLOBAL.slave_net_timeout;
-STOP SLAVE;
-source include/wait_for_slave_to_stop.inc;
-
---echo # open an extra connection to the slave
-connect(slave2,127.0.0.1,root,,test,$SLAVE_MYPORT,);
---echo # connection: slave2
---echo # set debug synchronization point
-SET DEBUG_SYNC='fix_slave_net_timeout SIGNAL parked WAIT_FOR go';
---echo # attempt to set slave_net_timeout, will wait on sync point
---send SET @@GLOBAL.slave_net_timeout = 100
-
---echo # connection: slave
-connection slave;
-SET DEBUG_SYNC='now WAIT_FOR parked';
-
---echo # connection: slave1
-connection slave1;
---echo # attempt to start the SQL thread
---send START SLAVE SQL_THREAD
-
---echo # connection: slave
-connection slave;
---echo # wait until SQL thread has been started
-let $wait_condition=
- select count(*) = 1 from information_schema.processlist
- where state = "Waiting for slave thread to start" and info = "START SLAVE SQL_THREAD";
---source include/wait_condition.inc
---echo # sleep a bit so that the SQL thread THD handle is initialized
-sleep 2;
---echo # signal the set slave_net_timeout to continue
-SET DEBUG_SYNC='now SIGNAL go';
-
---echo # connection: slave2
-connection slave2;
---echo # reap result of set slave_net_timeout
---reap
-
---echo # connection: slave1
-connection slave1;
---echo # reap result of starting the SQL thread
---reap
-
---echo # disconnect: slave2
-disconnect slave2;
-
---echo # connection: slave
-connection slave;
---echo # cleanup
-SET @@GLOBAL.slave_net_timeout = @save_slave_net_timeout;
-
-source include/rpl_end.inc;
diff --git a/mysql-test/suite/storage_engine/misc.result b/mysql-test/suite/storage_engine/misc.result
index 591f172d796..b79c78172ed 100644
--- a/mysql-test/suite/storage_engine/misc.result
+++ b/mysql-test/suite/storage_engine/misc.result
@@ -28,6 +28,9 @@ DROP EVENT ev1;
SELECT TABLE_NAME, COLUMN_NAME, REFERENCED_TABLE_NAME, REFERENCED_COLUMN_NAME
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE ORDER BY TABLE_NAME;
TABLE_NAME COLUMN_NAME REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
+column_stats column_name NULL NULL
+column_stats db_name NULL NULL
+column_stats table_name NULL NULL
columns_priv Column_name NULL NULL
columns_priv Db NULL NULL
columns_priv Host NULL NULL
@@ -49,6 +52,10 @@ help_topic help_topic_id NULL NULL
help_topic name NULL NULL
host Db NULL NULL
host Host NULL NULL
+index_stats db_name NULL NULL
+index_stats index_name NULL NULL
+index_stats prefix_arity NULL NULL
+index_stats table_name NULL NULL
ndb_binlog_index epoch NULL NULL
plugin name NULL NULL
proc db NULL NULL
@@ -64,6 +71,8 @@ proxies_priv Proxied_host NULL NULL
proxies_priv Proxied_user NULL NULL
proxies_priv User NULL NULL
servers Server_name NULL NULL
+table_stats db_name NULL NULL
+table_stats table_name NULL NULL
tables_priv Db NULL NULL
tables_priv Host NULL NULL
tables_priv Table_name NULL NULL
diff --git a/mysql-test/suite/sys_vars/r/default_master_connection_basic.result b/mysql-test/suite/sys_vars/r/default_master_connection_basic.result
new file mode 100644
index 00000000000..78425049324
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/default_master_connection_basic.result
@@ -0,0 +1,94 @@
+SET @start_session_value = @@session.default_master_connection;
+SELECT @start_session_value;
+@start_session_value
+
+SET @@session.default_master_connection = 'bar';
+SET @@session.default_master_connection = DEFAULT;
+SELECT @@session.default_master_connection;
+@@session.default_master_connection
+
+SET @@session.default_master_connection = @start_session_value;
+SELECT @@session.default_master_connection = '';
+@@session.default_master_connection = ''
+1
+SET @@global.default_master_connection = 'master1';
+ERROR HY000: Variable 'default_master_connection' is a SESSION variable and can't be used with SET GLOBAL
+SELECT @@global.default_master_connection;
+ERROR HY000: Variable 'default_master_connection' is a SESSION variable
+SET @@session.default_master_connection = 'master1';
+SELECT @@session.default_master_connection;
+@@session.default_master_connection
+master1
+SET @@session.default_master_connection = '';
+SELECT @@session.default_master_connection;
+@@session.default_master_connection
+
+SET @@session.default_master_connection = '1234-5678';
+SELECT @@session.default_master_connection;
+@@session.default_master_connection
+1234-5678
+SET @@session.default_master_connection = '@!*/"';
+SELECT @@session.default_master_connection;
+@@session.default_master_connection
+@!*/"
+SET @@session.default_master_connection = REPEAT('a',191);
+SELECT @@session.default_master_connection;
+@@session.default_master_connection
+aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+SET @@session.default_master_connection = master2;
+SELECT @@session.default_master_connection;
+@@session.default_master_connection
+master2
+SET @@session.default_master_connection = 1;
+ERROR 42000: Incorrect argument type to variable 'default_master_connection'
+SET @@session.default_master_connection = 65530.30;
+ERROR 42000: Incorrect argument type to variable 'default_master_connection'
+SET @@session.default_master_connection = FALSE;
+ERROR 42000: Incorrect argument type to variable 'default_master_connection'
+SELECT @@session.default_master_connection;
+@@session.default_master_connection
+master2
+SET @@session.default_master_connection = REPEAT('a',192);
+ERROR 42000: Variable 'default_master_connection' can't be set to the value of 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
+SELECT @@session.default_master_connection;
+@@session.default_master_connection
+master2
+SET @@session.default_master_connection = NULL;
+ERROR 42000: Variable 'default_master_connection' can't be set to the value of 'NULL'
+SELECT @@session.default_master_connection;
+@@session.default_master_connection
+master2
+SELECT @@global.default_master_connection = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='default_master_connection';
+ERROR HY000: Variable 'default_master_connection' is a SESSION variable
+SELECT @@session.default_master_connection = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.SESSION_VARIABLES
+WHERE VARIABLE_NAME='default_master_connection';
+@@session.default_master_connection = VARIABLE_VALUE
+1
+SET @@default_master_connection = 'foo';
+SELECT @@default_master_connection = @@local.default_master_connection;
+@@default_master_connection = @@local.default_master_connection
+1
+SELECT @@local.default_master_connection = @@session.default_master_connection;
+@@local.default_master_connection = @@session.default_master_connection
+1
+SET default_master_connection = 'foo';
+SELECT @@default_master_connection;
+@@default_master_connection
+foo
+SET local.default_master_connection = 'foo';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'default_master_connection = 'foo'' at line 1
+SELECT local.default_master_connection;
+ERROR 42S02: Unknown table 'local' in field list
+SET session.default_master_connection = 'foo';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'default_master_connection = 'foo'' at line 1
+SELECT session.default_master_connection;
+ERROR 42S02: Unknown table 'session' in field list
+SELECT default_master_connection = @@session.default_master_connection;
+ERROR 42S22: Unknown column 'default_master_connection' in 'field list'
+SET @@session.default_master_connection = @start_session_value;
+SELECT @@session.default_master_connection;
+@@session.default_master_connection
+
diff --git a/mysql-test/suite/sys_vars/r/default_storage_engine_basic.result b/mysql-test/suite/sys_vars/r/default_storage_engine_basic.result
index 727e6ca88f2..7e12c7dc477 100644
--- a/mysql-test/suite/sys_vars/r/default_storage_engine_basic.result
+++ b/mysql-test/suite/sys_vars/r/default_storage_engine_basic.result
@@ -19,7 +19,7 @@ MyISAM
SET @@global.default_storage_engine = MERGE;
SELECT @@global.default_storage_engine;
@@global.default_storage_engine
-MRG_MYISAM
+MRG_MyISAM
SET @@global.default_storage_engine = MEMORY;
SELECT @@global.default_storage_engine;
@@global.default_storage_engine
@@ -36,7 +36,7 @@ MyISAM
SET @@session.default_storage_engine = MERGE;
SELECT @@session.default_storage_engine;
@@session.default_storage_engine
-MRG_MYISAM
+MRG_MyISAM
SET @@session.default_storage_engine = MEMORY;
SELECT @@session.default_storage_engine;
@@session.default_storage_engine
diff --git a/mysql-test/suite/sys_vars/r/innodb_flush_log_at_trx_commit_basic.result b/mysql-test/suite/sys_vars/r/innodb_flush_log_at_trx_commit_basic.result
index 441fb4cd362..268d40c1be3 100644
--- a/mysql-test/suite/sys_vars/r/innodb_flush_log_at_trx_commit_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_flush_log_at_trx_commit_basic.result
@@ -50,7 +50,7 @@ Warnings:
Warning 1292 Truncated incorrect innodb_flush_log_at_trx_commit value: '1001'
SELECT @@global.innodb_flush_log_at_trx_commit;
@@global.innodb_flush_log_at_trx_commit
-2
+3
'#----------------------FN_DYNVARS_046_05------------------------#'
SELECT @@global.innodb_flush_log_at_trx_commit =
VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
@@ -60,22 +60,22 @@ VARIABLE_VALUE
1
SELECT @@global.innodb_flush_log_at_trx_commit;
@@global.innodb_flush_log_at_trx_commit
-2
+3
SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
WHERE VARIABLE_NAME='innodb_flush_log_at_trx_commit';
VARIABLE_VALUE
-2
+3
'#---------------------FN_DYNVARS_046_06-------------------------#'
SET @@global.innodb_flush_log_at_trx_commit = OFF;
ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_trx_commit'
SELECT @@global.innodb_flush_log_at_trx_commit;
@@global.innodb_flush_log_at_trx_commit
-2
+3
SET @@global.innodb_flush_log_at_trx_commit = ON;
ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_trx_commit'
SELECT @@global.innodb_flush_log_at_trx_commit;
@@global.innodb_flush_log_at_trx_commit
-2
+3
'#---------------------FN_DYNVARS_046_07----------------------#'
SET @@global.innodb_flush_log_at_trx_commit = TRUE;
SELECT @@global.innodb_flush_log_at_trx_commit;
diff --git a/mysql-test/suite/sys_vars/r/max_relay_log_size_basic.result b/mysql-test/suite/sys_vars/r/max_relay_log_size_basic.result
index d61e9dd20b0..6025e28ccaa 100644
--- a/mysql-test/suite/sys_vars/r/max_relay_log_size_basic.result
+++ b/mysql-test/suite/sys_vars/r/max_relay_log_size_basic.result
@@ -1,7 +1,7 @@
SET @start_value = @@global.max_relay_log_size;
SELECT @start_value;
@start_value
-0
+1073741824
'#--------------------FN_DYNVARS_082_01------------------------#'
SET @@global.max_relay_log_size = 5000;
Warnings:
@@ -9,7 +9,7 @@ Warning 1292 Truncated incorrect max_relay_log_size value: '5000'
SET @@global.max_relay_log_size = DEFAULT;
SELECT @@global.max_relay_log_size;
@@global.max_relay_log_size
-0
+1073741824
'#---------------------FN_DYNVARS_082_02-------------------------#'
SET @@global.max_relay_log_size = @start_value;
SELECT @@global.max_relay_log_size = 1024;
@@ -17,15 +17,17 @@ SELECT @@global.max_relay_log_size = 1024;
0
'#--------------------FN_DYNVARS_082_03------------------------#'
SET @@global.max_relay_log_size = 0;
+Warnings:
+Warning 1292 Truncated incorrect max_relay_log_size value: '0'
SELECT @@global.max_relay_log_size;
@@global.max_relay_log_size
-0
+4096
SET @@global.max_relay_log_size = 1;
Warnings:
Warning 1292 Truncated incorrect max_relay_log_size value: '1'
SELECT @@global.max_relay_log_size;
@@global.max_relay_log_size
-0
+4096
SET @@global.max_relay_log_size = 1073741824;
SELECT @@global.max_relay_log_size;
@@global.max_relay_log_size
@@ -48,7 +50,7 @@ Warnings:
Warning 1292 Truncated incorrect max_relay_log_size value: '-1'
SELECT @@global.max_relay_log_size;
@@global.max_relay_log_size
-0
+4096
SET @@global.max_relay_log_size = 100000000000;
Warnings:
Warning 1292 Truncated incorrect max_relay_log_size value: '100000000000'
@@ -65,7 +67,7 @@ Warnings:
Warning 1292 Truncated incorrect max_relay_log_size value: '-1024'
SELECT @@global.max_relay_log_size;
@@global.max_relay_log_size
-0
+4096
SET @@global.max_relay_log_size = 1073741825;
Warnings:
Warning 1292 Truncated incorrect max_relay_log_size value: '1073741825'
@@ -90,9 +92,9 @@ SELECT @@global.max_relay_log_size;
1073741824
'#-------------------FN_DYNVARS_082_05----------------------------#'
SET @@session.max_relay_log_size = 4096;
-ERROR HY000: Variable 'max_relay_log_size' is a GLOBAL variable and should be set with SET GLOBAL
SELECT @@session.max_relay_log_size;
-ERROR HY000: Variable 'max_relay_log_size' is a GLOBAL variable
+@@session.max_relay_log_size
+4096
'#----------------------FN_DYNVARS_082_06------------------------#'
SELECT @@global.max_relay_log_size = VARIABLE_VALUE
FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
@@ -110,11 +112,13 @@ Warnings:
Warning 1292 Truncated incorrect max_relay_log_size value: '1'
SELECT @@global.max_relay_log_size;
@@global.max_relay_log_size
-0
+4096
SET @@global.max_relay_log_size = FALSE;
+Warnings:
+Warning 1292 Truncated incorrect max_relay_log_size value: '0'
SELECT @@global.max_relay_log_size;
@@global.max_relay_log_size
-0
+4096
'#---------------------FN_DYNVARS_082_08----------------------#'
SET @@global.max_relay_log_size = 5000;
Warnings:
@@ -124,7 +128,8 @@ SELECT @@max_relay_log_size = @@global.max_relay_log_size;
1
'#---------------------FN_DYNVARS_082_09----------------------#'
SET max_relay_log_size = 6000;
-ERROR HY000: Variable 'max_relay_log_size' is a GLOBAL variable and should be set with SET GLOBAL
+Warnings:
+Warning 1292 Truncated incorrect max_relay_log_size value: '6000'
SELECT @@max_relay_log_size;
@@max_relay_log_size
4096
@@ -141,4 +146,4 @@ ERROR 42S22: Unknown column 'max_relay_log_size' in 'field list'
SET @@global.max_relay_log_size = @start_value;
SELECT @@global.max_relay_log_size;
@@global.max_relay_log_size
-0
+1073741824
diff --git a/mysql-test/suite/sys_vars/r/sql_slave_skip_counter_basic.result b/mysql-test/suite/sys_vars/r/sql_slave_skip_counter_basic.result
index e6d9aff7141..0e1d7af5485 100644
--- a/mysql-test/suite/sys_vars/r/sql_slave_skip_counter_basic.result
+++ b/mysql-test/suite/sys_vars/r/sql_slave_skip_counter_basic.result
@@ -35,9 +35,6 @@ VARIABLE_VALUE
1024
'#--------------------FN_DYNVARS_165_03-------------------------#'
SET @@sql_slave_skip_counter = 10;
-ERROR HY000: Variable 'sql_slave_skip_counter' is a GLOBAL variable and should be set with SET GLOBAL
SET @@session.sql_slave_skip_counter = 12;
-ERROR HY000: Variable 'sql_slave_skip_counter' is a GLOBAL variable and should be set with SET GLOBAL
SET @@local.sql_slave_skip_counter = 13;
-ERROR HY000: Variable 'sql_slave_skip_counter' is a GLOBAL variable and should be set with SET GLOBAL
SET @@global.sql_slave_skip_counter = 0;
diff --git a/mysql-test/suite/sys_vars/r/storage_engine_basic.result b/mysql-test/suite/sys_vars/r/storage_engine_basic.result
index 2831ebaa500..9461707dd79 100644
--- a/mysql-test/suite/sys_vars/r/storage_engine_basic.result
+++ b/mysql-test/suite/sys_vars/r/storage_engine_basic.result
@@ -19,7 +19,7 @@ MyISAM
SET @@global.storage_engine = MERGE;
SELECT @@global.storage_engine;
@@global.storage_engine
-MRG_MYISAM
+MRG_MyISAM
SET @@global.storage_engine = MEMORY;
SELECT @@global.storage_engine;
@@global.storage_engine
@@ -36,7 +36,7 @@ MyISAM
SET @@session.storage_engine = MERGE;
SELECT @@session.storage_engine;
@@session.storage_engine
-MRG_MYISAM
+MRG_MyISAM
SET @@session.storage_engine = MEMORY;
SELECT @@session.storage_engine;
@@session.storage_engine
diff --git a/mysql-test/suite/sys_vars/r/use_stat_tables_basic.result b/mysql-test/suite/sys_vars/r/use_stat_tables_basic.result
new file mode 100644
index 00000000000..64f6d868fa6
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/use_stat_tables_basic.result
@@ -0,0 +1,95 @@
+SET @start_global_value = @@global.use_stat_tables;
+SELECT @start_global_value;
+@start_global_value
+NEVER
+SET @start_session_value = @@session.use_stat_tables;
+SELECT @start_session_value;
+@start_session_value
+NEVER
+SET @@global.use_stat_tables = 2;
+SET @@global.use_stat_tables = DEFAULT;
+SELECT @@global.use_stat_tables;
+@@global.use_stat_tables
+NEVER
+SET @@global.use_stat_tables = 0;
+SELECT @@global.use_stat_tables;
+@@global.use_stat_tables
+NEVER
+SET @@global.use_stat_tables = 1;
+SELECT @@global.use_stat_tables;
+@@global.use_stat_tables
+COMPLEMENTARY
+SET @@global.use_stat_tables = 2;
+SELECT @@global.use_stat_tables;
+@@global.use_stat_tables
+PREFERABLY
+SET @@global.use_stat_tables = NEVER;
+SELECT @@global.use_stat_tables;
+@@global.use_stat_tables
+NEVER
+SET @@global.use_stat_tables = COMPLEMENTARY;
+SELECT @@global.use_stat_tables;
+@@global.use_stat_tables
+COMPLEMENTARY
+SET @@global.use_stat_tables = PREFERABLY;
+SELECT @@global.use_stat_tables;
+@@global.use_stat_tables
+PREFERABLY
+SET @@session.use_stat_tables = 0;
+SELECT @@session.use_stat_tables;
+@@session.use_stat_tables
+NEVER
+SET @@session.use_stat_tables = 1;
+SELECT @@session.use_stat_tables;
+@@session.use_stat_tables
+COMPLEMENTARY
+SET @@session.use_stat_tables = 2;
+SELECT @@session.use_stat_tables;
+@@session.use_stat_tables
+PREFERABLY
+SET @@session.use_stat_tables = NEVER;
+SELECT @@session.use_stat_tables;
+@@session.use_stat_tables
+NEVER
+SET @@session.use_stat_tables = PREFERABLY;
+SELECT @@session.use_stat_tables;
+@@session.use_stat_tables
+PREFERABLY
+SET @@session.use_stat_tables = COMPLEMENTARY;
+SELECT @@session.use_stat_tables;
+@@session.use_stat_tables
+COMPLEMENTARY
+set sql_mode=TRADITIONAL;
+SET @@global.use_stat_tables = 10;
+ERROR 42000: Variable 'use_stat_tables' can't be set to the value of '10'
+SET @@global.use_stat_tables = -1024;
+ERROR 42000: Variable 'use_stat_tables' can't be set to the value of '-1024'
+SET @@global.use_stat_tables = 2.4;
+ERROR 42000: Incorrect argument type to variable 'use_stat_tables'
+SET @@global.use_stat_tables = OFF;
+ERROR 42000: Variable 'use_stat_tables' can't be set to the value of 'OFF'
+SET @@session.use_stat_tables = 10;
+ERROR 42000: Variable 'use_stat_tables' can't be set to the value of '10'
+SET @@session.use_stat_tables = -2;
+ERROR 42000: Variable 'use_stat_tables' can't be set to the value of '-2'
+SET @@session.use_stat_tables = 1.2;
+ERROR 42000: Incorrect argument type to variable 'use_stat_tables'
+SET @@session.use_stat_tables = ON;
+ERROR 42000: Variable 'use_stat_tables' can't be set to the value of 'ON'
+SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='use_stat_tables';
+VARIABLE_NAME VARIABLE_VALUE
+USE_STAT_TABLES PREFERABLY
+SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES
+WHERE VARIABLE_NAME='use_stat_tables';
+VARIABLE_NAME VARIABLE_VALUE
+USE_STAT_TABLES COMPLEMENTARY
+SET @@global.use_stat_tables = @start_global_value;
+SELECT @@global.use_stat_tables;
+@@global.use_stat_tables
+NEVER
+SET @@session.use_stat_tables = @start_session_value;
+SELECT @@session.use_stat_tables;
+@@session.use_stat_tables
+NEVER
+set sql_mode='';
diff --git a/mysql-test/suite/sys_vars/t/default_master_connection_basic.test b/mysql-test/suite/sys_vars/t/default_master_connection_basic.test
new file mode 100644
index 00000000000..ba73e5ed4dd
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/default_master_connection_basic.test
@@ -0,0 +1,129 @@
+############## mysql-test\t\default_master_connection_basic.test ###############
+#
+# Implemented in the scope of MDEV-253
+# The variable is SESSION-only
+#
+
+--source include/load_sysvars.inc
+--source include/not_embedded.inc
+
+#############################################################
+# Save initial value #
+#############################################################
+
+SET @start_session_value = @@session.default_master_connection;
+SELECT @start_session_value;
+
+###################################################################
+# Display the DEFAULT value of default_master_connection #
+###################################################################
+
+SET @@session.default_master_connection = 'bar';
+SET @@session.default_master_connection = DEFAULT;
+SELECT @@session.default_master_connection;
+
+###################################################################
+# Check the DEFAULT value of default_master_connection #
+###################################################################
+
+SET @@session.default_master_connection = @start_session_value;
+SELECT @@session.default_master_connection = '';
+
+#################################################
+# Check that the GLOBAL scope is not applicable #
+#################################################
+
+--error ER_LOCAL_VARIABLE
+SET @@global.default_master_connection = 'master1';
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT @@global.default_master_connection;
+
+####################################################################################
+# Change the value of default_master_connection to a valid value for SESSION Scope #
+####################################################################################
+
+SET @@session.default_master_connection = 'master1';
+SELECT @@session.default_master_connection;
+SET @@session.default_master_connection = '';
+SELECT @@session.default_master_connection;
+SET @@session.default_master_connection = '1234-5678';
+SELECT @@session.default_master_connection;
+SET @@session.default_master_connection = '@!*/"';
+SELECT @@session.default_master_connection;
+SET @@session.default_master_connection = REPEAT('a',191);
+SELECT @@session.default_master_connection;
+SET @@session.default_master_connection = master2;
+SELECT @@session.default_master_connection;
+
+
+#####################################################################
+# Change the value of default_master_connection to an invalid value #
+#####################################################################
+
+--error ER_WRONG_TYPE_FOR_VAR
+SET @@session.default_master_connection = 1;
+--error ER_WRONG_TYPE_FOR_VAR
+SET @@session.default_master_connection = 65530.30;
+--error ER_WRONG_TYPE_FOR_VAR
+SET @@session.default_master_connection = FALSE;
+SELECT @@session.default_master_connection;
+--error ER_WRONG_VALUE_FOR_VAR
+SET @@session.default_master_connection = REPEAT('a',192);
+SELECT @@session.default_master_connection;
+--error ER_WRONG_VALUE_FOR_VAR
+SET @@session.default_master_connection = NULL;
+SELECT @@session.default_master_connection;
+
+###############################################################################
+# Check if the value in GLOBAL & SESSION Tables matches value in variable #
+###############################################################################
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT @@global.default_master_connection = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='default_master_connection';
+
+SELECT @@session.default_master_connection = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.SESSION_VARIABLES
+WHERE VARIABLE_NAME='default_master_connection';
+
+
+########################################################################################################
+# Check if accessing variable with SESSION,LOCAL and without SCOPE points to same session variable #
+########################################################################################################
+
+SET @@default_master_connection = 'foo';
+SELECT @@default_master_connection = @@local.default_master_connection;
+SELECT @@local.default_master_connection = @@session.default_master_connection;
+
+
+###################################################################################
+# Check if default_master_connection can be accessed with and without @@ sign #
+###################################################################################
+
+SET default_master_connection = 'foo';
+SELECT @@default_master_connection;
+--Error ER_PARSE_ERROR
+SET local.default_master_connection = 'foo';
+--Error ER_UNKNOWN_TABLE
+SELECT local.default_master_connection;
+--Error ER_PARSE_ERROR
+SET session.default_master_connection = 'foo';
+--Error ER_UNKNOWN_TABLE
+SELECT session.default_master_connection;
+--Error ER_BAD_FIELD_ERROR
+SELECT default_master_connection = @@session.default_master_connection;
+
+
+####################################
+# Restore initial value #
+####################################
+
+SET @@session.default_master_connection = @start_session_value;
+SELECT @@session.default_master_connection;
+
+
+#############################################################
+# END OF default_master_connection TESTS #
+#############################################################
+
diff --git a/mysql-test/suite/sys_vars/t/max_relay_log_size_basic.test b/mysql-test/suite/sys_vars/t/max_relay_log_size_basic.test
index e39778baca8..05d0fd1be82 100644
--- a/mysql-test/suite/sys_vars/t/max_relay_log_size_basic.test
+++ b/mysql-test/suite/sys_vars/t/max_relay_log_size_basic.test
@@ -24,7 +24,7 @@
###############################################################################
--source include/load_sysvars.inc
-
+--source include/not_embedded.inc
###################################################################
# START OF max_relay_log_size TESTS #
@@ -105,9 +105,7 @@ SELECT @@global.max_relay_log_size;
# Test if accessing session max_relay_log_size gives error #
########################################################################
---Error ER_GLOBAL_VARIABLE
SET @@session.max_relay_log_size = 4096;
---Error ER_INCORRECT_GLOBAL_LOCAL_VAR
SELECT @@session.max_relay_log_size;
@@ -150,7 +148,6 @@ SELECT @@max_relay_log_size = @@global.max_relay_log_size;
# Check if max_relay_log_size can be accessed with and without @@ sign #
#############################################################################
---Error ER_GLOBAL_VARIABLE
SET max_relay_log_size = 6000;
SELECT @@max_relay_log_size;
--Error ER_PARSE_ERROR
diff --git a/mysql-test/suite/sys_vars/t/sql_slave_skip_counter_basic.test b/mysql-test/suite/sys_vars/t/sql_slave_skip_counter_basic.test
index 10ca47133b7..e1ea74f33c3 100644
--- a/mysql-test/suite/sys_vars/t/sql_slave_skip_counter_basic.test
+++ b/mysql-test/suite/sys_vars/t/sql_slave_skip_counter_basic.test
@@ -22,6 +22,11 @@
# server-system-variables.html #
# #
###############################################################################
+# #
+# Modification date: 2012-09-30 #
+# With implementation of MDEV-253, the variable scope can be session #
+# #
+###############################################################################
--source include/not_embedded.inc
--source include/load_sysvars.inc
@@ -88,11 +93,8 @@ WHERE VARIABLE_NAME='sql_slave_skip_counter';
# Checking if variable is accessible with session scope #
###################################################################
---Error ER_GLOBAL_VARIABLE
SET @@sql_slave_skip_counter = 10;
---Error ER_GLOBAL_VARIABLE
SET @@session.sql_slave_skip_counter = 12;
---Error ER_GLOBAL_VARIABLE
SET @@local.sql_slave_skip_counter = 13;
SET @@global.sql_slave_skip_counter = 0;
diff --git a/mysql-test/suite/sys_vars/t/use_stat_tables_basic.test b/mysql-test/suite/sys_vars/t/use_stat_tables_basic.test
new file mode 100644
index 00000000000..7f526edf206
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/use_stat_tables_basic.test
@@ -0,0 +1,100 @@
+--source include/load_sysvars.inc
+
+#############################################################
+# Save initial value #
+#############################################################
+
+SET @start_global_value = @@global.use_stat_tables;
+SELECT @start_global_value;
+SET @start_session_value = @@session.use_stat_tables;
+SELECT @start_session_value;
+
+###############################################################
+# Display the DEFAULT value of use_stat_tables #
+###############################################################
+
+SET @@global.use_stat_tables = 2;
+SET @@global.use_stat_tables = DEFAULT;
+SELECT @@global.use_stat_tables;
+
+
+##################################################################################
+# Change the value of use_stat_tables to a valid value for GLOBAL Scope #
+##################################################################################
+
+SET @@global.use_stat_tables = 0;
+SELECT @@global.use_stat_tables;
+SET @@global.use_stat_tables = 1;
+SELECT @@global.use_stat_tables;
+SET @@global.use_stat_tables = 2;
+SELECT @@global.use_stat_tables;
+
+SET @@global.use_stat_tables = NEVER;
+SELECT @@global.use_stat_tables;
+SET @@global.use_stat_tables = COMPLEMENTARY;
+SELECT @@global.use_stat_tables;
+SET @@global.use_stat_tables = PREFERABLY;
+SELECT @@global.use_stat_tables;
+
+####################################################################################
+# Change the value of use_stat_tables to a valid value for SESSION Scope #
+####################################################################################
+
+SET @@session.use_stat_tables = 0;
+SELECT @@session.use_stat_tables;
+SET @@session.use_stat_tables = 1;
+SELECT @@session.use_stat_tables;
+SET @@session.use_stat_tables = 2;
+SELECT @@session.use_stat_tables;
+
+SET @@session.use_stat_tables = NEVER;
+SELECT @@session.use_stat_tables;
+SET @@session.use_stat_tables = PREFERABLY;
+SELECT @@session.use_stat_tables;
+SET @@session.use_stat_tables = COMPLEMENTARY;
+SELECT @@session.use_stat_tables;
+
+#####################################################################
+# Change the value of use_stat_tables to an invalid value #
+#####################################################################
+set sql_mode=TRADITIONAL;
+--Error ER_WRONG_VALUE_FOR_VAR
+SET @@global.use_stat_tables = 10;
+--Error ER_WRONG_VALUE_FOR_VAR
+SET @@global.use_stat_tables = -1024;
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.use_stat_tables = 2.4;
+--Error ER_WRONG_VALUE_FOR_VAR
+SET @@global.use_stat_tables = OFF;
+--Error ER_WRONG_VALUE_FOR_VAR
+SET @@session.use_stat_tables = 10;
+--Error ER_WRONG_VALUE_FOR_VAR
+SET @@session.use_stat_tables = -2;
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@session.use_stat_tables = 1.2;
+--Error ER_WRONG_VALUE_FOR_VAR
+SET @@session.use_stat_tables = ON;
+
+###############################################################################
+# Check if the value in GLOBAL & SESSION Tables matches value in variable #
+###############################################################################
+
+SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='use_stat_tables';
+
+SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES
+WHERE VARIABLE_NAME='use_stat_tables';
+
+####################################
+# Restore initial value #
+####################################
+
+SET @@global.use_stat_tables = @start_global_value;
+SELECT @@global.use_stat_tables;
+SET @@session.use_stat_tables = @start_session_value;
+SELECT @@session.use_stat_tables;
+set sql_mode='';
+
+######################################################
+# END OF use_stat_tables TESTS #
+###################################################### \ No newline at end of file
diff --git a/mysql-test/suite/vcol/r/vcol_merge.result b/mysql-test/suite/vcol/r/vcol_merge.result
index 4b5ed838c3a..e127ec35e8c 100644
--- a/mysql-test/suite/vcol/r/vcol_merge.result
+++ b/mysql-test/suite/vcol/r/vcol_merge.result
@@ -4,5 +4,5 @@ create table t2 (a int, b int as (a % 10));
insert into t1 values (1,default);
insert into t2 values (2,default);
create table t3 (a int, b int as (a % 10)) engine=MERGE UNION=(t1,t2);
-ERROR HY000: MRG_MYISAM storage engine does not support computed columns
+ERROR HY000: MRG_MyISAM storage engine does not support computed columns
drop table t1,t2;
diff --git a/mysql-test/t/alter_table_mdev539_maria.test b/mysql-test/t/alter_table_mdev539_maria.test
new file mode 100644
index 00000000000..7e01bc3be84
--- /dev/null
+++ b/mysql-test/t/alter_table_mdev539_maria.test
@@ -0,0 +1,7 @@
+
+--echo #
+set @@storage_engine= Aria;
+
+--source include/alter_table_mdev539.inc
+
+set @@storage_engine= default;
diff --git a/mysql-test/t/alter_table_mdev539_myisam.test b/mysql-test/t/alter_table_mdev539_myisam.test
new file mode 100644
index 00000000000..0a5669088bf
--- /dev/null
+++ b/mysql-test/t/alter_table_mdev539_myisam.test
@@ -0,0 +1,7 @@
+
+--echo #
+set @@storage_engine= MyISAM;
+
+--source include/alter_table_mdev539.inc
+
+set @@storage_engine= default;
diff --git a/mysql-test/t/cassandra.test b/mysql-test/t/cassandra.test
new file mode 100644
index 00000000000..2b92956d974
--- /dev/null
+++ b/mysql-test/t/cassandra.test
@@ -0,0 +1,719 @@
+#
+# Tests for cassandra storage engine
+#
+--source include/have_cassandra.inc
+
+--disable_warnings
+drop table if exists t0, t1;
+--enable_warnings
+
+# Test various errors on table creation.
+--error ER_REQUIRES_PRIMARY_KEY
+create table t1 (a int) engine=cassandra
+ thrift_host='localhost' keyspace='foo' column_family='colfam';
+
+--error ER_CONNECT_TO_FOREIGN_DATA_SOURCE
+create table t1 (a int primary key, b int) engine=cassandra
+ thrift_host='localhost' keyspace='foo' column_family='colfam';
+
+--error ER_CONNECT_TO_FOREIGN_DATA_SOURCE
+create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
+ thrift_host='127.0.0.2' keyspace='foo' column_family='colfam';
+
+--error ER_CONNECT_TO_FOREIGN_DATA_SOURCE
+create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
+ thrift_host='localhost' keyspace='no_such_keyspace' column_family='colfam';
+
+# No column family specified
+--error ER_CONNECT_TO_FOREIGN_DATA_SOURCE
+create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
+ thrift_host='localhost' keyspace='no_such_keyspace';
+
+############################################################################
+## Cassandra initialization
+############################################################################
+
+# Step 1: remove the keyspace that could be left over from the previous test
+--remove_files_wildcard $MYSQLTEST_VARDIR cassandra_test_cleanup.cql
+--write_file $MYSQLTEST_VARDIR/cassandra_test_cleanup.cql
+drop keyspace mariadbtest2;
+EOF
+--error 0,1,2
+--system cqlsh -3 -f $MYSQLTEST_VARDIR/cassandra_test_cleanup.cql
+
+# Step 2: create new keyspace and test column families
+--remove_files_wildcard $MYSQLTEST_VARDIR cassandra_test_init.cql
+--write_file $MYSQLTEST_VARDIR/cassandra_test_init.cql
+
+CREATE KEYSPACE mariadbtest2
+ WITH strategy_class = 'org.apache.cassandra.locator.SimpleStrategy'
+ AND strategy_options:replication_factor='1';
+
+USE mariadbtest2;
+create columnfamily cf1 ( pk varchar primary key, data1 varchar, data2 bigint);
+
+create columnfamily cf2 (rowkey bigint primary key, a bigint);
+
+create columnfamily cf3 (rowkey bigint primary key, intcol int);
+
+create columnfamily cf4 (rowkey bigint primary key, datecol timestamp);
+
+create columnfamily cf5 (rowkey bigint primary key, uuidcol uuid);
+
+create columnfamily cf6 (rowkey uuid primary key, col1 int);
+
+create columnfamily cf7 (rowkey int primary key, boolcol boolean);
+
+create columnfamily cf8 (rowkey varchar primary key, countercol counter);
+update cf8 set countercol=countercol+1 where rowkey='cnt1';
+update cf8 set countercol=countercol+100 where rowkey='cnt2';
+
+create columnfamily cf9 (rowkey varchar primary key, varint_col varint);
+insert into cf9 (rowkey, varint_col) values ('val-01', 1);
+insert into cf9 (rowkey, varint_col) values ('val-0x123456', 1193046);
+insert into cf9 (rowkey, varint_col) values ('val-0x12345678', 305419896);
+
+create columnfamily cf11 (rowkey varchar primary key, decimal_col decimal);
+insert into cf11 (rowkey, decimal_col) values ('val_0.5', 0.5);
+insert into cf11 (rowkey, decimal_col) values ('val_1.5', 1.5);
+insert into cf11 (rowkey, decimal_col) values ('val_1234', 1234);
+
+create columnfamily cf12 (rowkey varchar primary key, decimal_col decimal);
+
+EOF
+--error 0,1,2
+--system cqlsh -3 -f $MYSQLTEST_VARDIR/cassandra_test_init.cql
+
+
+# Step 3: Cassandra's CQL doesn't allow certain kinds of queries. Run them in
+# CLI
+--remove_files_wildcard $MYSQLTEST_VARDIR cassandra_test_init.cli
+--write_file $MYSQLTEST_VARDIR/cassandra_test_init.cli
+use mariadbtest2;
+CREATE COLUMN FAMILY cf10
+ WITH comparator = UTF8Type
+ AND key_validation_class=UTF8Type
+ AND default_validation_class = UTF8Type;
+
+CREATE COLUMN FAMILY cfd1
+ WITH comparator = UTF8Type
+ AND key_validation_class=UTF8Type
+ AND default_validation_class = UTF8Type;
+SET cfd1['1']['very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_very_long_name']='1';
+
+CREATE COLUMN FAMILY cfd2
+ WITH comparator = UTF8Type
+ AND key_validation_class=Int32Type
+ AND default_validation_class = UTF8Type;
+
+EOF
+
+--error 0,1,2
+--system cassandra-cli -f $MYSQLTEST_VARDIR/cassandra_test_init.cli
+
+############################################################################
+## Cassandra initialization ends
+############################################################################
+
+--echo # Now, create a table for real and insert data
+create table t1 (pk varchar(36) primary key, data1 varchar(60), data2 bigint) engine=cassandra
+ thrift_host='localhost' keyspace='mariadbtest2' column_family='cf1';
+
+--echo # Just in case there were left-overs from previous:
+delete from t1;
+select * from t1;
+
+insert into t1 values ('rowkey10', 'data1-value', 123456);
+insert into t1 values ('rowkey11', 'data1-value2', 34543);
+insert into t1 values ('rowkey12', 'data1-value3', 454);
+select * from t1;
+
+explain
+select * from t1 where pk='rowkey11';
+select * from t1 where pk='rowkey11';
+
+# Deletion functions weirdly: it sets all columns to NULL
+# but when If I do this in cassandra-cli:
+#
+# del cf1[ascii('rowkey10')]
+#
+# Subsequent 'list cf1' command also gives
+#
+# RowKey: rowkey10
+#
+# without any columns.
+#
+# CQL seems to simply ignore all "incomplete" records.
+
+delete from t1 where pk='rowkey11';
+select * from t1;
+
+delete from t1;
+select * from t1;
+
+--echo #
+--echo # A query with filesort (check that table_flags() & HA_REC_NOT_IN_SEQ,
+--echo # also check ::rnd_pos()
+--echo #
+insert into t1 values ('rowkey10', 'data1-value', 123456);
+insert into t1 values ('rowkey11', 'data1-value2', 34543);
+insert into t1 values ('rowkey12', 'data1-value3', 454);
+select * from t1 order by data2;
+
+delete from t1;
+drop table t1;
+
+--echo #
+--echo # MDEV-476: Cassandra: Server crashes in calculate_key_len on DELETE with ORDER BY
+--echo #
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, a BIGINT) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+
+INSERT INTO t1 VALUES (1,1),(2,2);
+DELETE FROM t1 ORDER BY a LIMIT 1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Batched INSERT
+--echo #
+show variables like 'cassandra_insert_batch_size';
+show status like 'cassandra_row_insert%';
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, a BIGINT) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+
+delete from t1;
+INSERT INTO t1 VALUES (1,1),(2,2);
+DELETE FROM t1 ORDER BY a LIMIT 1;
+
+DROP TABLE t1;
+show status like 'cassandra_row_insert%';
+
+--echo # FLUSH STATUS doesn't work for our variables, just like with InnoDB.
+flush status;
+show status like 'cassandra_row_insert%';
+
+--echo #
+--echo # Batched Key Access
+--echo #
+
+--echo # Control variable (we are not yet able to make use of MRR's buffer)
+show variables like 'cassandra_multi%';
+
+--echo # MRR-related status variables:
+show status like 'cassandra_multi%';
+
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, a BIGINT) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+delete from t1;
+INSERT INTO t1 VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
+
+set @tmp_jcl=@@join_cache_level;
+set join_cache_level=8;
+explain select * from t1 A, t1 B where B.rowkey=A.a;
+
+select * from t1 A, t1 B where B.rowkey=A.a;
+show status like 'cassandra_multi%';
+
+# The following INSERTs are really UPDATEs
+insert into t1 values(1, 8);
+insert into t1 values(3, 8);
+insert into t1 values(5, 8);
+insert into t1 values(7, 8);
+
+select * from t1 A, t1 B where B.rowkey=A.a;
+show status like 'cassandra_multi%';
+
+delete from t1;
+drop table t1;
+
+--echo #
+--echo # MDEV-480: TRUNCATE TABLE on a Cassandra table does not remove rows
+--echo #
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, a BIGINT) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+INSERT INTO t1 VALUES (0,0),(1,1),(2,2);
+truncate table t1;
+select * from t1;
+drop table t1;
+
+--echo #
+--echo # MDEV-494, part #1: phantom row for big full-scan selects
+--echo #
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, a BIGINT) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+
+insert into t1 select A.a + 10 * B.a + 100*C.a, 12345 from t0 A, t0 B, t0 C;
+
+select count(*) from t1;
+select count(*) from t1 where a=12345;
+
+delete from t1;
+drop table t1;
+drop table t0;
+
+--echo # 32-bit INT type support
+CREATE TABLE t1 (rowkey BIGINT PRIMARY KEY, intcol INT) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf3';
+insert into t1 values (10,10);
+insert into t1 values (12,12);
+delete from t1;
+drop table t1;
+
+--echo #
+--echo # Try accessing column family w/o explicitly defined columns
+--echo #
+--error ER_INTERNAL_ERROR
+CREATE TABLE t1 (my_primary_key varchar(10) PRIMARY KEY) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf10';
+
+CREATE TABLE t1 (rowkey varchar(10) PRIMARY KEY) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf10';
+
+DROP TABLE t1;
+
+--echo #
+--echo # Timestamp datatype support
+--echo #
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, datecol timestamp) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf4';
+
+delete from t2;
+insert into t2 values (1, '2012-08-29 01:23:45');
+select * from t2;
+delete from t2;
+
+--echo # MDEV-498: Cassandra: Inserting a timestamp does not work on a 32-bit system
+INSERT INTO t2 VALUES (10,'2012-12-12 12:12:12');
+SELECT * FROM t2;
+delete from t2;
+
+--echo #
+--echo # (no MDEV#) Check that insert counters work correctly
+--echo #
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+let $start_inserts=`select variable_value from information_schema.SESSION_STATUS
+ where variable_name ='Cassandra_row_inserts'`;
+let $start_insert_batches=`select variable_value from information_schema.SESSION_STATUS
+ where variable_name ='Cassandra_row_insert_batches'`;
+
+set cassandra_insert_batch_size=10;
+insert into t2 select A.a+10*B.a, now() from t0 A, t0 B;
+
+--disable_query_log
+eval select
+ (select variable_value - $start_inserts from information_schema.SESSION_STATUS
+ where variable_name ='Cassandra_row_inserts')
+ AS 'inserts',
+ (select variable_value - $start_insert_batches from information_schema.SESSION_STATUS
+ where variable_name ='Cassandra_row_insert_batches')
+ AS 'insert_batches';
+--enable_query_log
+
+let $start_inserts=`select variable_value from information_schema.SESSION_STATUS
+ where variable_name ='Cassandra_row_inserts'`;
+let $start_insert_batches=`select variable_value from information_schema.SESSION_STATUS
+ where variable_name ='Cassandra_row_insert_batches'`;
+
+set cassandra_insert_batch_size=1;
+insert into t2 select A.a+10*B.a+100, now() from t0 A, t0 B;
+
+--disable_query_log
+eval select
+ (select variable_value - $start_inserts from information_schema.SESSION_STATUS
+ where variable_name ='Cassandra_row_inserts')
+ AS 'inserts',
+ (select variable_value - $start_insert_batches from information_schema.SESSION_STATUS
+ where variable_name ='Cassandra_row_insert_batches')
+ AS 'insert_batches';
+--enable_query_log
+
+delete from t2;
+drop table t2;
+drop table t0;
+
+--echo #
+--echo # UUID datatype support
+--echo #
+#create columnfamily cf5 (rowkey bigint primary key, uuidcol uuid);
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol char(36)) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+delete from t2;
+
+insert into t2 values(1,'9b5658dc-f32f-11e1-94cd-f46d046e9f09');
+
+--error ER_WARN_DATA_OUT_OF_RANGE
+insert into t2 values(2,'not-an-uuid');
+
+--error ER_WARN_DATA_OUT_OF_RANGE
+insert into t2 values(3,'9b5658dc-f32f-11e1=94cd-f46d046e9f09');
+
+--error ER_WARN_DATA_OUT_OF_RANGE
+insert into t2 values(4,'9b5658dc-fzzf-11e1-94cd-f46d046e9f09');
+
+--error ER_WARN_DATA_OUT_OF_RANGE
+insert into t2 values
+ (5,'9b5658dc-f11f-11e1-94cd-f46d046e9f09'),
+ (6,'9b5658dc-f11f011e1-94cd-f46d046e9f09');
+
+select * from t2;
+
+delete from t2;
+drop table t2;
+
+# create columnfamily cf6 (rowkey uuid primary key, col1 int);
+CREATE TABLE t2 (rowkey char(36) PRIMARY KEY, col1 int) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf6';
+delete from t2;
+
+insert into t2 values('9b5658dc-f32f-11e1-94cd-f46d046e9f09', 1234);
+
+--error ER_WARN_DATA_OUT_OF_RANGE
+insert into t2 values('not-an-uuid', 563);
+
+select * from t2;
+delete from t2;
+drop table t2;
+
+
+--echo #
+--echo # boolean datatype support
+--echo #
+# create columnfamily cf7 (rowkey int primary key, boolcol boolean);
+CREATE TABLE t2 (rowkey int PRIMARY KEY, boolcol bool) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf7';
+insert into t2 values (0, 0);
+insert into t2 values (1, 1);
+select * from t2;
+delete from t2;
+drop table t2;
+
+
+--echo #
+--echo # Counter datatype support (read-only)
+--echo #
+# create columnfamily cf8 (rowkey int primary key, countercol counter);
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, countercol bigint) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf8';
+select * from t2;
+drop table t2;
+
+--echo #
+--echo # Check that @@cassandra_default_thrift_host works
+--echo #
+show variables like 'cassandra_default_thrift_host';
+set @tmp=@@cassandra_default_thrift_host;
+--error ER_GLOBAL_VARIABLE
+set cassandra_default_thrift_host='localhost';
+set global cassandra_default_thrift_host='localhost';
+
+--echo # Try creating a table without specifying thrift_host:
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, countercol bigint) ENGINE=CASSANDRA
+ keyspace='mariadbtest2' column_family = 'cf8';
+select * from t2;
+drop table t2;
+
+set global cassandra_default_thrift_host=@tmp;
+
+--echo #
+--echo # Consistency settings
+--echo #
+show variables like 'cassandra_%consistency';
+set @tmp=@@cassandra_write_consistency;
+
+--echo # Unfortunately, there is no easy way to check if setting have the effect..
+set cassandra_write_consistency='ONE';
+set cassandra_write_consistency='QUORUM';
+set cassandra_write_consistency='LOCAL_QUORUM';
+set cassandra_write_consistency='EACH_QUORUM';
+set cassandra_write_consistency='ALL';
+set cassandra_write_consistency='ANY';
+set cassandra_write_consistency='TWO';
+set cassandra_write_consistency='THREE';
+
+set cassandra_write_consistency=@tmp;
+
+--echo #
+--echo # varint datatype support
+--echo #
+# create columnfamily cf9 (rowkey varchar primary key, varint_col varint);
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, varint_col varbinary(32)) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf9';
+--sorted_result
+select rowkey, hex(varint_col) from t2;
+drop table t2;
+
+--echo # now, let's check what happens when MariaDB's column is not wide enough:
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, varint_col varbinary(2)) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf9';
+--sorted_result
+--error ER_INTERNAL_ERROR
+select rowkey, hex(varint_col) from t2;
+drop table t2;
+
+--echo #
+--echo # Decimal datatype support
+--echo #
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, decimal_col varbinary(32)) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf11';
+select rowkey, hex(decimal_col) from t2;
+drop table t2;
+
+--echo #
+--echo # Mapping TIMESTAMP -> int64
+--echo #
+set @save_tz= @@time_zone;
+set time_zone='UTC';
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, datecol timestamp) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf4';
+insert into t2 values (1, '2012-08-29 01:23:45');
+INSERT INTO t2 VALUES (10,'2012-08-29 01:23:46');
+drop table t2;
+
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, datecol bigint) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf4';
+select * from t2;
+delete from t2;
+drop table t2;
+set time_zone=@save_tz;
+
+--echo #
+--echo # Check whether changing parameters with ALTER TABLE works.
+--echo #
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, decimal_col varbinary(32)) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf11';
+
+#--error ER_INTERNAL_ERROR
+#alter table t2 column_family='cf9';
+
+drop table t2;
+
+CREATE TABLE t2 (rowkey varchar(32) PRIMARY KEY, decimal_col varbinary(32)) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf11';
+
+let $c1= `select variable_value from information_schema.global_status
+ where variable_name like 'cassandra_row_inserts'`;
+alter table t2 column_family='cf12';
+let $c2= `select variable_value from information_schema.global_status
+ where variable_name like 'cassandra_row_inserts'`;
+
+--disable_query_log
+eval select ($c2 - $c1) as 'Writes made during ALTER TABLE';
+--enable_query_log
+
+drop table t2;
+
+--echo #
+--echo # UPDATE command support
+--echo #
+create table t1 (pk varchar(36) primary key, data1 varchar(60), data2 bigint) engine=cassandra
+ thrift_host='localhost' keyspace='mariadbtest2' column_family='cf1';
+
+insert into t1 values ('rowkey10', 'data1-value', 123456);
+insert into t1 values ('rowkey11', 'data1-value2', 34543);
+insert into t1 values ('rowkey12', 'data1-value3', 454);
+select * from t1;
+
+update t1 set data1='updated-1' where pk='rowkey11';
+select * from t1;
+update t1 set pk='new-rowkey12' where pk='rowkey12';
+select * from t1;
+
+delete from t1;
+drop table t1;
+
+--echo #
+--echo # Dynamic columns support
+--echo #
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol blob DYNAMIC_COLUMN_STORAGE=1) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+drop table t2;
+
+--echo #error: dynamic column is not a blob
+--error ER_WRONG_FIELD_SPEC
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol char(36) DYNAMIC_COLUMN_STORAGE=1) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+
+--echo #error: double dynamic column
+--error ER_WRONG_FIELD_SPEC
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol blob DYNAMIC_COLUMN_STORAGE=1, textcol blob DYNAMIC_COLUMN_STORAGE=1) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+
+--echo #
+--echo # Dynamic column read
+--echo #
+#prepare data
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol char(36)) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+delete from t2;
+insert into t2 values(1,'9b5658dc-f32f-11e1-94cd-f46d046e9f09');
+insert into t2 values(2,'9b5658dc-f32f-11e1-94cd-f46d046e9f0a');
+drop table t2;
+
+#test dynamic column read
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+select rowkey, column_list(dyn), column_get(dyn, 'uuidcol' as char) from t2;
+drop table t2;
+
+#cleanup data
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, uuidcol char(36)) ENGINE=CASSANDRA
+ thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+delete from t2;
+drop table t2;
+
+--echo #
+--echo # Dynamic column insert
+--echo #
+CREATE TABLE t2 (rowkey bigint PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf5';
+insert into t2 values (1, column_create("dyn1", 1, "dyn2", "two"));
+select rowkey, column_json(dyn) from t2;
+delete from t2;
+drop table t2;
+--echo # bigint
+CREATE TABLE t1 (rowkey bigint PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf2';
+insert into t1 values (1, column_create("dyn1", 1, "dyn2", "two", 'a', 254324));
+insert into t1 values (2, column_create("dyn1", 1, "dyn2", "two", 'a', 2543));
+select rowkey, column_json(dyn) from t1;
+delete from t1;
+drop table t1;
+--echo # int
+CREATE TABLE t1 (rowkey bigint PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf3';
+insert into t1 values (1, column_create("dyn1", 1, "dyn2", "two", 'intcol', 254324));
+insert into t1 values (2, column_create("dyn1", 1, "dyn2", "two", 'intcol', 2543));
+select rowkey, column_json(dyn) from t1;
+delete from t1;
+drop table t1;
+--echo # timestamp
+CREATE TABLE t1 (rowkey bigint PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf4';
+insert into t1 values (1, column_create("dyn1", 1, "dyn2", "two", 'datecol', 254324));
+insert into t1 values (2, column_create("dyn1", 1, "dyn2", "two", 'datecol', 2543));
+select rowkey, column_json(dyn) from t1;
+delete from t1;
+drop table t1;
+--echo # boolean
+CREATE TABLE t1 (rowkey int PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cf7';
+insert into t1 values (1, column_create("dyn1", 1, "dyn2", "two", 'boolcol', 254324));
+insert into t1 values (2, column_create("dyn1", 1, "dyn2", "two", 'boolcol', 0));
+select rowkey, column_json(dyn) from t1;
+select rowkey, column_json(dyn) from t1;
+update t1 set dyn=column_add(dyn, "dyn2", null, "dyn3", "3");
+select rowkey, column_json(dyn) from t1;
+update t1 set dyn=column_add(dyn, "dyn1", null) where rowkey= 1;
+select rowkey, column_json(dyn) from t1;
+update t1 set dyn=column_add(dyn, "dyn3", null, "a", "ddd");
+select rowkey, column_json(dyn) from t1;
+update t1 set dyn=column_add(dyn, "12345678901234", "ddd");
+select rowkey, column_json(dyn) from t1;
+update t1 set dyn=column_add(dyn, "12345678901234", null);
+select rowkey, column_json(dyn) from t1;
+update t1 set dyn=column_add(dyn, 'boolcol', null) where rowkey= 2;
+select rowkey, column_json(dyn) from t1;
+update t1 set rowkey= 3, dyn=column_add(dyn, "dyn1", null, 'boolcol', 0) where rowkey= 2;
+select rowkey, column_json(dyn) from t1;
+delete from t1;
+drop table t1;
+
+CREATE TABLE t1 (rowkey varchar(10) PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes) ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cfd1';
+--error ER_INTERNAL_ERROR
+select * from t1;
+drop table t1;
+
+# MDEV-560
+CREATE TABLE t1 (rowkey int PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes)
+ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cfd2';
+DELETE FROM t1;
+insert into t1 values (1, column_create("dyn", 1));
+select rowkey, column_list(dyn) from t1;
+# Cleanup
+delete from t1;
+DROP TABLE t1;
+
+# MDEV-561 (incorrect format data to dynamic column)
+CREATE TABLE t1 (rowkey int PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes)
+ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cfd2';
+--error ER_DYN_COL_WRONG_FORMAT
+insert into t1 values (1,'9b5658dc-f32f-11e1-94cd-f46d046e9f0a');
+delete from t1;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-565: Server crashes in ha_cassandra::write_row on
+--echo # inserting NULL into a dynamic column
+--echo #
+CREATE TABLE t1 (rowkey int PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes)
+ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cfd2';
+insert into t1 values (1, NULL);
+delete from t1;
+DROP TABLE t1;
+
+--echo #
+--echo # strange side effect of Cassandra - remiving all columns of primary
+--echo # key removes all row.
+--echo #
+CREATE TABLE t1 (rowkey int PRIMARY KEY, dyn blob DYNAMIC_COLUMN_STORAGE=yes)
+ENGINE=CASSANDRA thrift_host='localhost' keyspace='mariadbtest2' column_family = 'cfd2';
+INSERT INTO t1 VALUES(2,column_create("ab","ab"));
+select rowkey, column_json(dyn) from t1;
+UPDATE t1 set dyn=NULL;
+select rowkey, column_json(dyn) from t1;
+INSERT INTO t1 VALUES(2,column_create("ab","ab"));
+select rowkey, column_json(dyn) from t1;
+UPDATE t1 set dyn="";
+select rowkey, column_json(dyn) from t1;
+delete from t1;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-4005 #Server crashes on creating a Cassandra table
+--echo # with a mix of static and dynamic columns
+--echo #
+--disable_warnings
+DROP TABLE IF EXISTS t1, t2;
+--enable_warnings
+
+--remove_files_wildcard $MYSQLTEST_VARDIR cassandra_test_cleanup.cql
+--write_file $MYSQLTEST_VARDIR/cassandra_test_cleanup.cql
+drop keyspace bug;
+EOF
+--error 0,1,2
+--system cqlsh -3 -f $MYSQLTEST_VARDIR/cassandra_test_cleanup.cql
+
+--remove_files_wildcard $MYSQLTEST_VARDIR cassandra_test_init.cql
+--write_file $MYSQLTEST_VARDIR/cassandra_test_init.cql
+
+CREATE KEYSPACE bug
+ WITH strategy_class = 'org.apache.cassandra.locator.SimpleStrategy'
+ AND strategy_options:replication_factor='1';
+
+USE bug;
+create columnfamily cf1 ( pk int primary key, col_int int, a bigint );
+EOF
+
+--system cqlsh -3 -f $MYSQLTEST_VARDIR/cassandra_test_init.cql
+
+
+CREATE TABLE t1 (
+ pk int primary key,
+ col_int int,
+ dyncol blob DYNAMIC_COLUMN_STORAGE=yes
+) ENGINE=cassandra keyspace='bug' thrift_host = '127.0.0.1' column_family='cf1';
+
+drop table t1;
+
+############################################################################
+## Cassandra cleanup
+############################################################################
+--disable_parsing
+drop columnfamily cf1;
+drop columnfamily cf2;
+drop columnfamily cf3;
+drop columnfamily cf4;
+drop columnfamily cf5;
+drop columnfamily cf6;
+drop columnfamily cf7;
+--enable_parsing
+############################################################################
+## Cassandra cleanup ends
+############################################################################
+
diff --git a/mysql-test/t/comments.test b/mysql-test/t/comments.test
index 08c74c99d0c..2ae6e5a4e38 100644
--- a/mysql-test/t/comments.test
+++ b/mysql-test/t/comments.test
@@ -8,7 +8,7 @@ multi line comment */;
--error 1065
;
select 1 /*!32301 +1 */;
-select 1 /*!52301 +1 */;
+select 1 /*!952301 +1 */;
select 1--1;
# Note that the following returns 4 while it should return 2
# This is because the mysqld server doesn't parse -- comments
@@ -27,7 +27,7 @@ select 1 # The rest of the row will be ignored
select 1 /*M! +1 */;
select 1 /*M!50000 +1 */;
select 1 /*M!50300 +1 */;
-select 2 /*M!99999 +1 */;
+select 2 /*M!999999 +1 */;
--error ER_PARSE_ERROR
select 2 /*M!0000 +1 */;
@@ -39,11 +39,11 @@ select 2 /*M!0000 +1 */;
select 1/*!2*/;
--error ER_PARSE_ERROR
-select 1/*!000002*/;
+select 1/*!0000002*/;
select 1/*!999992*/;
-select 1 + /*!00000 2 */ + 3 /*!99999 noise*/ + 4;
+select 1 + /*!00000 2 */ + 3 /*!999999 noise*/ + 4;
#
# Bug#28779 (mysql_query() allows execution of statements with unbalanced
@@ -69,10 +69,10 @@ prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*! AND 2=2;";
prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*! AND 2=2;*";
--error 1064
-prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*!98765' AND b = 'bar';";
+prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*!998765' AND b = 'bar';";
--error 1064
-prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*!98765' AND b = 'bar';*";
+prepare bar from "DELETE FROM table_28779 WHERE a = 7 OR 1=1/*!998765' AND b = 'bar';*";
drop table table_28779;
diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test
index 1fabb49138c..a61f89539cb 100644
--- a/mysql-test/t/create.test
+++ b/mysql-test/t/create.test
@@ -55,10 +55,10 @@ create table a (`aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
#
# Some wrong defaults, so these creates should fail too (Bug #5902)
#
---error 1067
create table t1 (a datetime default now());
---error 1294
+drop table t1;
create table t1 (a datetime on update now());
+drop table t1;
--error 1067
create table t1 (a int default 100 auto_increment);
--error 1067
diff --git a/mysql-test/t/dyncol.test b/mysql-test/t/dyncol.test
index 66e308540f4..de30cac610a 100644
--- a/mysql-test/t/dyncol.test
+++ b/mysql-test/t/dyncol.test
@@ -550,3 +550,164 @@ select hex(COLUMN_CREATE(0, COLUMN_GET(@a, 9 AS DECIMAL(19,0))));
select hex(COLUMN_CREATE(0, COLUMN_GET(COLUMN_CREATE(0, 0.0 as decimal), 0 as decimal)));
select hex(COLUMN_CREATE(0, 0.0 as decimal));
+
+--echo #
+--echo # test of symbolic names
+--echo #
+--echo # creation test (names)
+set names utf8;
+select hex(column_create("адын", 1212));
+select hex(column_create("1212", 1212));
+select hex(column_create(1212, 2, "www", 3));
+select hex(column_create("1212", 2, "www", 3));
+select hex(column_create("1212", 2, 3, 3));
+select hex(column_create("1212", 2, "адын", 1, 3, 3));
+set names default;
+
+--echo # fetching column test (names)
+set names utf8;
+select column_get(column_create("адын", 1212), "адын" as int);
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), "адын" as int);
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), 1212 as int);
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), "3" as int);
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), 3 as int);
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), 4 as int);
+select column_get(column_create("1212", 2, "адын", 1, 3, 3), "4" as int);
+set names default;
+
+--echo # column existance test (names)
+set names utf8;
+select column_exists(column_create("адын", 1212), "адын");
+select column_exists(column_create("адын", 1212), "aады");
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), "адын");
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), 1212);
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), "3");
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), 3);
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), 4);
+select column_exists(column_create("1212", 2, "адын", 1, 3, 3), "4");
+set names default;
+
+--echo # column changing test (names)
+select hex(column_add(column_create(1, "AAA"), "b", "BBB"));
+select hex(column_add(column_create("1", "AAA"), "b", "BBB"));
+select column_get(column_add(column_create(1, "AAA"), "b", "BBB"), 1 as char);
+select column_get(column_add(column_create(1, "AAA"), "b", "BBB"), "b" as char);
+select hex(column_add(column_create("a", "AAA"), 1, "BBB"));
+select hex(column_add(column_create("a", "AAA"), "1", "BBB"));
+select hex(column_add(column_create("a", 1212 as integer), "b", "1212" as integer));
+select hex(column_add(column_create("a", 1212 as integer), "a", "1212" as integer));
+select hex(column_add(column_create("a", 1212 as integer), "a", NULL as integer));
+select hex(column_add(column_create("a", 1212 as integer), "b", NULL as integer));
+select hex(column_add(column_create("a", 1212 as integer), "b", 1212 as integer, "a", 11 as integer));
+select column_get(column_add(column_create("a", 1212 as integer), "b", 1212 as integer, "a", 11 as integer), "a" as integer);
+select column_get(column_add(column_create("a", 1212 as integer), "b", 1212 as integer, "a", 11 as integer), "b" as integer);
+select hex(column_add(column_create("a", 1212 as integer), "a", 1212 as integer, "b", 11 as integer));
+select hex(column_add(column_create("a", NULL as integer), "a", 1212 as integer, "b", 11 as integer));
+select hex(column_add(column_create("a", 1212 as integer, "b", 1212 as integer), "a", 11 as integer));
+select hex(column_add(column_create("a", 1), "a", null));
+select column_list(column_add(column_create("a", 1), "a", null));
+select column_list(column_add(column_create("a", 1), "a", ""));
+select hex(column_add("", "a", 1));
+
+-- echo # column delete (names)
+select hex(column_delete(column_create("a", 1212 as integer, "b", 1212 as integer), "a"));
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "b"));
+select hex(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer));
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "c"));
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "d"));
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "b", "a"));
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "b", "c"));
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "a", "b", "c"));
+select hex(column_delete(column_create("a", 1 as integer, "b", 2 as integer, "c", 3 as integer), "a", "b", "c", "e"));
+select hex(column_delete(column_create("a", 1), "a"));
+select hex(column_delete("", "a"));
+
+--echo #
+--echo # MDEV-458 DNAMES: Server crashes on using an unquoted string
+--echo # as a dynamic column name
+--echo #
+--error ER_BAD_FIELD_ERROR
+select COLUMN_CREATE(color, "black");
+
+--echo #
+--echo # MDEV-489 Assertion `offset < 0x1f' failed in
+--echo # type_and_offset_store on COLUMN_ADD
+--echo #
+CREATE TABLE t1 (f1 tinyblob);
+
+INSERT INTO t1 VALUES (COLUMN_CREATE('col1', REPEAT('a',30)));
+select column_check(f1) from t1;
+UPDATE t1 SET f1 = COLUMN_ADD( f1, REPEAT('b',211), 'val2' );
+# we can't detect last string cut with 100% probability,
+# because we detect it by string end
+select column_check(f1) from t1;
+UPDATE t1 SET f1 = COLUMN_ADD( f1, REPEAT('c',211), 'val3' );
+select column_check(f1) from t1;
+
+drop table t1;
+
+--echo #
+--echo # MDEV-490/MDEV-491 null as arguments
+--echo #
+SELECT COLUMN_GET( COLUMN_CREATE( 'col', 'val' ), NULL AS CHAR );
+SELECT COLUMN_GET( NULL, 'col' as char );
+SELECT COLUMN_EXISTS( COLUMN_CREATE( 'col', 'val' ), NULL);
+SELECT COLUMN_EXISTS( NULL, 'col');
+SELECT COLUMN_CREATE( NULL, 'val' );
+SELECT COLUMN_ADD( NULL, 'val', 'col');
+
+--echo #
+--echo # MDEV-488: Assertion `column_name->length < 255' failed on a
+--echo # column name with length 255 (precisely)
+--echo #
+SELECT hex(COLUMN_CREATE(REPEAT('a',255),1));
+--error ER_DYN_COL_DATA
+SELECT hex(COLUMN_CREATE(REPEAT('a',65536),1));
+
+--echo #
+--echo # JSON conversion
+--echo #
+select column_json(column_create("int", -1212 as int, "uint", 12334 as unsigned int, "decimal", "23.344" as decimal, "double", 1.23444e50 as double, "string", 'gdgd\\dhdjh"dhdhd' as char, "time", "0:45:49.000001" AS time, "datetime", "2011-04-05 0:45:49.000001" AS datetime, "date", "2011-04-05" AS date));
+select column_json(column_create(1, -1212 as int, 2, 12334 as unsigned int, 3, "23.344" as decimal, 4, 1.23444e50 as double, 5, 'gdgd\\dhdjh"dhdhd' as char, 6, "0:45:49.000001" AS time, 7, "2011-04-05 0:45:49.000001" AS datetime, 8, "2011-04-05" AS date));
+
+--echo #
+--echo # CHECK test
+--echo #
+SELECT COLUMN_CHECK(COLUMN_CREATE(1,'a'));
+SELECT COLUMN_CHECK('abracadabra');
+SELECT COLUMN_CHECK('');
+SELECT COLUMN_CHECK(NULL);
+
+--echo #
+--echo # escaping check
+--echo #
+select column_json(column_create("string", "'\"/\\`.,whatever")),hex(column_create("string", "'\"/\\`.,whatever"));
+
+--echo #
+--echo # embedding test
+--echo #
+select column_json(column_create("val", "val", "emb", column_create("val2", "val2")));
+select column_json(column_create(1, "val", 2, column_create(3, "val2")));
+
+--echo #
+--echo # Time encoding
+--echo #
+select hex(column_create("t", "800:46:06.23434" AS time)) as hex,
+ column_json(column_create("t", "800:46:06.23434" AS time)) as json;
+select hex(column_create(1, "800:46:06.23434" AS time)) as hex,
+ column_json(column_create(1, "800:46:06.23434" AS time)) as json;
+
+select hex(column_create("t", "800:46:06" AS time)) as hex,
+ column_json(column_create("t", "800:46:06" AS time)) as json;
+select hex(column_create(1, "800:46:06" AS time)) as hex,
+ column_json(column_create(1, "800:46:06" AS time)) as json;
+
+select hex(column_create("t", "2012-12-21 10:46:06.23434" AS datetime)) as hex,
+ column_json(column_create("t", "2012-12-21 10:46:06.23434" AS datetime)) as json;
+select hex(column_create(1, "2012-12-21 10:46:06.23434" AS datetime)) as hex,
+ column_json(column_create(1, "2012-12-21 10:46:06.23434" AS datetime)) as json;
+
+select hex(column_create("t", "2012-12-21 10:46:06" AS datetime)) as hex,
+ column_json(column_create("t", "2012-12-21 10:46:06" AS datetime)) as json;
+select hex(column_create(1, "2012-12-21 10:46:06" AS datetime)) as hex,
+ column_json(column_create(1, "2012-12-21 10:46:06" AS datetime)) as json;
diff --git a/mysql-test/t/file_contents.test b/mysql-test/t/file_contents.test
index 87c1b76f15c..d54a60e62a3 100644
--- a/mysql-test/t/file_contents.test
+++ b/mysql-test/t/file_contents.test
@@ -42,11 +42,11 @@ if ($dir_bin eq '/usr/') {
$dir_docs = "$dir_docs/Docs"; # development tree
}
}
-$found_version = "No line 'MySQL source #.#.#'";
-$found_revision = "No line 'revision-id: .....'";
+$found_version = "No line 'MySQL source #.#.#' in $dir_docs/INFO_SRC";
+$found_revision = "No line 'revision-id: .....' in $dir_docs/INFO_SRC";
open(I_SRC,"<","$dir_docs/INFO_SRC") or print "Cannot open 'INFO_SRC' in '$dir_docs' (starting from bindir '$dir_bin')\n";
while(defined ($line = <I_SRC>)) {
- if ($line =~ m|^MySQL source \d\.\d\.\d+|) {$found_version = "Found MySQL version number";}
+ if ($line =~ m|^MySQL source \d+\.\d\.\d+|) {$found_version = "Found MySQL version number";}
if ($line =~ m|^revision-id: .*@.*-2\d{13}-\w+$|) {$found_revision = "Found BZR revision id";}
}
close I_SRC;
diff --git a/mysql-test/t/filesort_debug.test b/mysql-test/t/filesort_debug.test
index f89d46a7bd0..c375334ad29 100644
--- a/mysql-test/t/filesort_debug.test
+++ b/mysql-test/t/filesort_debug.test
@@ -11,7 +11,7 @@ SET @old_debug= @@session.debug;
CREATE TABLE t1(f0 int auto_increment primary key, f1 int);
INSERT INTO t1(f1) VALUES (0),(1),(2),(3),(4),(5);
-SET session debug_dbug= '+d,make_sort_keys_alloc_fail';
+SET session debug_dbug= '+d,alloc_sort_buffer_fail';
CALL mtr.add_suppression("Out of sort memory");
--error ER_OUT_OF_SORTMEMORY
SELECT * FROM t1 ORDER BY f1 ASC, f0;
diff --git a/mysql-test/t/flush.test b/mysql-test/t/flush.test
index 037e2fcaa73..a1df9359d30 100644
--- a/mysql-test/t/flush.test
+++ b/mysql-test/t/flush.test
@@ -701,3 +701,11 @@ disconnect con1;
connection default;
COMMIT;
DROP TABLE t1;
+
+--echo #
+--echo # Test flushing slave or relay logs twice
+--echo #
+--error ER_WRONG_USAGE
+flush relay logs,relay logs;
+--error ER_WRONG_USAGE
+flush slave,slave;
diff --git a/mysql-test/t/func_encrypt_nossl.test b/mysql-test/t/func_encrypt_nossl.test
index 11866db1da7..2dafaa671c4 100644
--- a/mysql-test/t/func_encrypt_nossl.test
+++ b/mysql-test/t/func_encrypt_nossl.test
@@ -1,4 +1,4 @@
--- source include/not_openssl.inc
+-- source include/not_ssl.inc
#
# Test output from des_encrypt and des_decrypt when server is
diff --git a/mysql-test/t/func_system.test b/mysql-test/t/func_system.test
index a5041a83623..fa09e81a300 100644
--- a/mysql-test/t/func_system.test
+++ b/mysql-test/t/func_system.test
@@ -13,9 +13,9 @@ select user() like _utf8"%@%";
select user() like _latin1"%@%";
select charset(user());
-select version()>="3.23.29";
-select version()>=_utf8"3.23.29";
-select version()>=_latin1"3.23.29";
+select version()>="03.23.29";
+select version()>=_utf8"03.23.29";
+select version()>=_latin1"03.23.29";
select charset(version());
explain extended select database(), user();
diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test
index f60f5e5ced7..57ef68595bf 100644
--- a/mysql-test/t/func_time.test
+++ b/mysql-test/t/func_time.test
@@ -526,7 +526,7 @@ SELECT MAKETIME(0, 0, 4294967296);
SELECT MAKETIME(CAST(-1 AS UNSIGNED), 0, 0);
# check if EXTRACT() handles out-of-range values correctly
-SELECT EXTRACT(HOUR FROM '100000:02:03');
+SELECT EXTRACT(HOUR FROM '10000:02:03');
# check if we get proper warnings if both input string truncation
# and out-of-range value occur
diff --git a/mysql-test/t/function_defaults.test b/mysql-test/t/function_defaults.test
new file mode 100644
index 00000000000..dd29b4609cb
--- /dev/null
+++ b/mysql-test/t/function_defaults.test
@@ -0,0 +1,23 @@
+--echo #
+--echo # Test of function defaults for any server, including embedded.
+--echo #
+
+--source include/have_innodb.inc
+
+--echo #
+--echo # Function defaults run 1. No microsecond precision.
+--echo #
+let $current_timestamp=CURRENT_TIMESTAMP;
+let $now=NOW();
+let $timestamp=TIMESTAMP;
+let $datetime=DATETIME;
+source 'include/function_defaults.inc';
+
+--echo #
+--echo # Function defaults run 2. Six digits scale on seconds precision.
+--echo #
+let $current_timestamp=CURRENT_TIMESTAMP(6);
+let $now=NOW(6);
+let $timestamp=TIMESTAMP(6);
+let $datetime=DATETIME(6);
+source 'include/function_defaults.inc';
diff --git a/mysql-test/t/function_defaults_notembedded.test b/mysql-test/t/function_defaults_notembedded.test
new file mode 100644
index 00000000000..3d686c4b272
--- /dev/null
+++ b/mysql-test/t/function_defaults_notembedded.test
@@ -0,0 +1,18 @@
+--echo #
+--echo # Test of function defaults for non-embedded server.
+--echo #
+
+--source include/not_embedded.inc
+--source include/have_debug_sync.inc
+
+--echo #
+--echo # Function defaults run 1. No microsecond precision.
+--echo #
+let $timestamp=TIMESTAMP;
+--source include/function_defaults_notembedded.inc
+
+--echo #
+--echo # Function defaults run 2. Six digits scale on seconds precision.
+--echo #
+let $timestamp=TIMESTAMP(6);
+--source include/function_defaults_notembedded.inc
diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test
index a90fcba77fc..3c6f997f28e 100644
--- a/mysql-test/t/group_by.test
+++ b/mysql-test/t/group_by.test
@@ -1585,3 +1585,52 @@ EXECUTE stmt;
EXECUTE stmt;
DROP TABLE t1;
+
+--echo #
+--echo # Bug #58782
+--echo # Missing rows with SELECT .. WHERE .. IN subquery
+--echo # with full GROUP BY and no aggr
+--echo #
+
+CREATE TABLE t1 (
+ pk INT NOT NULL,
+ col_int_nokey INT,
+ PRIMARY KEY (pk)
+);
+
+INSERT INTO t1 VALUES (10,7);
+INSERT INTO t1 VALUES (11,1);
+INSERT INTO t1 VALUES (12,5);
+INSERT INTO t1 VALUES (13,3);
+
+## original query:
+
+SELECT pk AS field1, col_int_nokey AS field2
+FROM t1
+WHERE col_int_nokey > 0
+GROUP BY field1, field2;
+
+## store query results in a new table:
+
+CREATE TABLE where_subselect
+ SELECT pk AS field1, col_int_nokey AS field2
+ FROM t1
+ WHERE col_int_nokey > 0
+ GROUP BY field1, field2
+;
+
+## query the new table and compare to original using WHERE ... IN():
+
+SELECT *
+FROM where_subselect
+WHERE (field1, field2) IN (
+ SELECT pk AS field1, col_int_nokey AS field2
+ FROM t1
+ WHERE col_int_nokey > 0
+ GROUP BY field1, field2
+);
+
+DROP TABLE t1;
+DROP TABLE where_subselect;
+
+--echo # End of Bug #58782
diff --git a/mysql-test/t/mdev-504.test b/mysql-test/t/mdev-504.test
new file mode 100644
index 00000000000..bc38e99067a
--- /dev/null
+++ b/mysql-test/t/mdev-504.test
@@ -0,0 +1,77 @@
+--disable_ps_protocol
+
+CREATE TABLE A (
+ pk INTEGER AUTO_INCREMENT PRIMARY KEY,
+ fdate DATE
+) ENGINE=MyISAM;
+
+--delimiter |
+
+CREATE PROCEDURE p_analyze()
+BEGIN
+ DECLARE attempts INTEGER DEFAULT 100;
+ wl_loop: WHILE attempts > 0 DO
+ ANALYZE TABLE A;
+ SET attempts = attempts - 1;
+ END WHILE wl_loop;
+END |
+
+CREATE FUNCTION rnd3() RETURNS INT
+BEGIN
+ RETURN ROUND(3 * RAND() + 0.5);
+END |
+
+--delimiter ;
+
+SET GLOBAL use_stat_tables = PREFERABLY;
+
+--let $trial = 100
+
+--disable_query_log
+--disable_result_log
+--disable_warnings
+while ($trial)
+{
+
+ --connect (con1,localhost,root,,)
+ --send CALL p_analyze()
+
+ --connect (con2,localhost,root,,)
+ --send CALL p_analyze()
+
+ --let $run = 100
+
+ while ($run)
+ {
+ --connect (con3,localhost,root,,)
+
+ let $query = `SELECT CASE rnd3()
+ WHEN 1 THEN 'INSERT INTO A (pk) VALUES (NULL)'
+ WHEN 2 THEN 'DELETE FROM A LIMIT 1'
+ ELSE 'UPDATE A SET fdate = 2 LIMIT 1' END`;
+ --eval $query
+ --disconnect con3
+ --dec $run
+ }
+
+ --connection con2
+ --reap
+ --disconnect con2
+ --connection con1
+ --reap
+ --disconnect con1
+
+ --dec $trial
+}
+
+--enable_query_log
+--enable_result_log
+--enable_warnings
+
+# Cleanup
+--connection default
+DROP TABLE A;
+DROP PROCEDURE p_analyze;
+DROP FUNCTION rnd3;
+SET GLOBAL use_stat_tables = DEFAULT;
+
diff --git a/mysql-test/t/mysql.test b/mysql-test/t/mysql.test
index 76d980dee30..71a985654ef 100644
--- a/mysql-test/t/mysql.test
+++ b/mysql-test/t/mysql.test
@@ -223,19 +223,19 @@ drop table t17583;
--echo Test connect with dbname + _invalid_ hostname
# Mask the errno of the error message
---replace_regex /\([0-9]*\)/(errno)/
+--replace_regex /\([0-9|-]*\)/(errno)/
--error 1
--exec $MYSQL test -e "\r test invalid_hostname" 2>&1
---replace_regex /\([0-9]*\)/(errno)/
+--replace_regex /\([0-9|-]*\)/(errno)/
--error 1
--exec $MYSQL test -e "connect test invalid_hostname" 2>&1
--echo The commands reported in the bug report
---replace_regex /\([0-9]*\)/(errno)/
+--replace_regex /\([0-9|-]*\)/(errno)/
--error 1
--exec $MYSQL test -e "\r\r\n\r\n cyril\ has\ found\ a\ bug\ :)XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" 2>&1
-#--replace_regex /\([0-9]*\)/(errno)/
+#--replace_regex /\([0-9|-]*\)/(errno)/
#--error 1
#--exec echo '\r\r\n\r\n cyril\ has\ found\ a\ bug\ :)XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' | $MYSQL 2>&1
@@ -244,7 +244,7 @@ drop table t17583;
--exec $MYSQL test -e "\r test_really_long_dbnamexxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx localhost" 2>&1
--echo Too long hostname
---replace_regex /\([0-9]*\)/(errno)/
+--replace_regex /\([0-9|-]*\)/(errno)/
--error 1
--exec $MYSQL test -e "\r test cyrils_superlonghostnameXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" 2>&1
@@ -360,7 +360,7 @@ remove_file $MYSQLTEST_VARDIR/tmp/bug31060.sql;
#
# Bug#37268 'binary' character set makes CLI-internal commands case sensitive
#
---replace_regex /\([0-9]*\)/(errno)/
+--replace_regex /\([0-9|-]*\)/(errno)/
--error 1
--exec $MYSQL --default-character-set=binary test -e "CONNECT test invalid_hostname" 2>&1
--exec $MYSQL --default-character-set=binary test -e "DELIMITER //" 2>&1
diff --git a/mysql-test/t/mysql_upgrade.test b/mysql-test/t/mysql_upgrade.test
index f200bce551f..78d439d5d52 100644
--- a/mysql-test/t/mysql_upgrade.test
+++ b/mysql-test/t/mysql_upgrade.test
@@ -47,7 +47,7 @@ DROP USER mysqltest1@'%';
--echo Run mysql_upgrade with a non existing server socket
--replace_result $MYSQLTEST_VARDIR var
---replace_regex /.*mysqlcheck.*: Got/mysqlcheck: Got/ /\([0-9]*\)/(errno)/
+--replace_regex /.*mysqlcheck.*: Got/mysqlcheck: Got/ /\([0-9|-]*\)/(errno)/
--error 1
--exec $MYSQL_UPGRADE --force --host=not_existing_host 2>&1
diff --git a/mysql-test/t/mysqldump-max.test b/mysql-test/t/mysqldump-max.test
index 27c1a3ce20c..d0a4870ba31 100644
--- a/mysql-test/t/mysqldump-max.test
+++ b/mysql-test/t/mysqldump-max.test
@@ -1194,7 +1194,7 @@ DROP TABLE t2;
--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/mwl136.sql
--replace_regex /\/\* xid=.* \*\//\/* XID *\// /Server ver: .*, Binlog ver: .*/Server ver: #, Binlog ver: #/ /table_id: [0-9]+/table_id: #/
-SHOW BINLOG EVENTS LIMIT 6,3;
+SHOW BINLOG EVENTS LIMIT 7,3;
--perl
my $f= "$ENV{MYSQLTEST_VARDIR}/tmp/mwl136.sql";
open F, '<', $f or die "Failed to open $f: $!\n";
diff --git a/mysql-test/t/order_by.test b/mysql-test/t/order_by.test
index 3e20d22d726..d573c3b5b8a 100644
--- a/mysql-test/t/order_by.test
+++ b/mysql-test/t/order_by.test
@@ -865,6 +865,7 @@ CALL mtr.add_suppression("Out of sort memory");
--error ER_OUT_OF_SORTMEMORY
select * from t1 order by b;
drop table t1;
+set session sort_buffer_size= 30000;
--echo #
--echo # Bug #39844: Query Crash Mysql Server 5.0.67
@@ -1391,7 +1392,205 @@ ORDER BY t2.c LIMIT 5;
DROP TABLE t1,t2,t3;
-#
+--echo #
+--echo # WL#1393 - Optimizing filesort with small limit
+--echo #
+
+CREATE TABLE t1(f0 int auto_increment primary key, f1 int, f2 varchar(200));
+INSERT INTO t1(f1, f2) VALUES
+(0,"0"),(1,"1"),(2,"2"),(3,"3"),(4,"4"),(5,"5"),
+(6,"6"),(7,"7"),(8,"8"),(9,"9"),(10,"10"),
+(11,"11"),(12,"12"),(13,"13"),(14,"14"),(15,"15"),
+(16,"16"),(17,"17"),(18,"18"),(19,"19"),(20,"20"),
+(21,"21"),(22,"22"),(23,"23"),(24,"24"),(25,"25"),
+(26,"26"),(27,"27"),(28,"28"),(29,"29"),(30,"30"),
+(31,"31"),(32,"32"),(33,"33"),(34,"34"),(35,"35"),
+(36,"36"),(37,"37"),(38,"38"),(39,"39"),(40,"40"),
+(41,"41"),(42,"42"),(43,"43"),(44,"44"),(45,"45"),
+(46,"46"),(47,"47"),(48,"48"),(49,"49"),(50,"50"),
+(51,"51"),(52,"52"),(53,"53"),(54,"54"),(55,"55"),
+(56,"56"),(57,"57"),(58,"58"),(59,"59"),(60,"60"),
+(61,"61"),(62,"62"),(63,"63"),(64,"64"),(65,"65"),
+(66,"66"),(67,"67"),(68,"68"),(69,"69"),(70,"70"),
+(71,"71"),(72,"72"),(73,"73"),(74,"74"),(75,"75"),
+(76,"76"),(77,"77"),(78,"78"),(79,"79"),(80,"80"),
+(81,"81"),(82,"82"),(83,"83"),(84,"84"),(85,"85"),
+(86,"86"),(87,"87"),(88,"88"),(89,"89"),(90,"90"),
+(91,"91"),(92,"92"),(93,"93"),(94,"94"),(95,"95"),
+(96,"96"),(97,"97"),(98,"98"),(99,"99");
+
+################
+## Test sort when source data fits in memory
+
+SELECT * FROM t1 ORDER BY f1 ASC, f0 LIMIT 100;
+SELECT * FROM t1 ORDER BY f1 ASC, f0 LIMIT 30;
+SELECT * FROM t1 ORDER BY f1 ASC, f0 LIMIT 0;
+SELECT * FROM t1 ORDER BY f2 DESC, f0 LIMIT 30;
+SELECT * FROM t1 ORDER BY f2 DESC, f0 LIMIT 0;
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 20;
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 0;
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 10 OFFSET 10;
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 0 OFFSET 10;
+
+################
+## Test sort when source data does not fit in memory
+set sort_buffer_size= 32768;
+CREATE TEMPORARY TABLE tmp (f1 int, f2 varchar(20));
+INSERT INTO tmp SELECT f1, f2 FROM t1;
+INSERT INTO t1(f1, f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1, f2 FROM t1;
+INSERT INTO t1(f1, f2) SELECT * FROM tmp;
+
+SELECT * FROM t1 ORDER BY f1 ASC, f0 LIMIT 30;
+SELECT * FROM t1 ORDER BY f1 ASC, f0 LIMIT 0;
+SELECT * FROM t1 ORDER BY f2 DESC, f0 LIMIT 30;
+SELECT * FROM t1 ORDER BY f2 DESC, f0 LIMIT 0;
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 20;
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 0;
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 10 OFFSET 10;
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 0 OFFSET 10;
+
+################
+## Test with SQL_CALC_FOUND_ROWS
+set sort_buffer_size= 32768;
+SELECT SQL_CALC_FOUND_ROWS * FROM t1
+ORDER BY f1, f0 LIMIT 30;
+SELECT FOUND_ROWS();
+
+SELECT SQL_CALC_FOUND_ROWS * FROM t1
+ORDER BY f1, f0 LIMIT 0;
+SELECT FOUND_ROWS();
+
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE f1>10
+ORDER BY f2, f0 LIMIT 20;
+SELECT FOUND_ROWS();
+
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE f1>10
+ORDER BY f2, f0 LIMIT 0;
+SELECT FOUND_ROWS();
+
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE f1>10
+ORDER BY f2, f0 LIMIT 10 OFFSET 10;
+SELECT FOUND_ROWS();
+
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE f1>10
+ORDER BY f2, f0 LIMIT 0 OFFSET 10;
+SELECT FOUND_ROWS();
+
+################
+## Test sorting with join
+## These are re-written to use PQ during execution.
+set sort_buffer_size= 327680;
+
+SELECT * FROM t1 JOIN tmp on t1.f2=tmp.f2
+ORDER BY tmp.f1, f0 LIMIT 30;
+
+SELECT * FROM t1 JOIN tmp on t1.f2=tmp.f2
+ORDER BY tmp.f1, f0 LIMIT 30 OFFSET 30;
+
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 JOIN tmp on t1.f2=tmp.f2
+ORDER BY tmp.f1, f0 LIMIT 30 OFFSET 30;
+SELECT FOUND_ROWS();
+
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 JOIN tmp on t1.f2=tmp.f2
+WHERE t1.f2>20
+ORDER BY tmp.f1, f0 LIMIT 30 OFFSET 30;
+SELECT FOUND_ROWS();
+
+################
+## Test views
+CREATE VIEW v1 as SELECT * FROM t1 ORDER BY f1, f0 LIMIT 30;
+SELECT * FROM v1;
+drop view v1;
+
+CREATE VIEW v1 as SELECT * FROM t1 ORDER BY f1, f0 LIMIT 100;
+SELECT * FROM v1 ORDER BY f2, f0 LIMIT 30;
+
+CREATE VIEW v2 as SELECT * FROM t1 ORDER BY f2, f0 LIMIT 100;
+SELECT * FROM v1 JOIN v2 on v1.f1=v2.f1 ORDER BY v1.f2,v1.f0,v2.f0
+LIMIT 30;
+
+################
+## Test group & having
+SELECT floor(f1/10) f3, count(f2) FROM t1
+GROUP BY 1 ORDER BY 2,1 LIMIT 5;
+
+SELECT floor(f1/10) f3, count(f2) FROM t1
+GROUP BY 1 ORDER BY 2,1 LIMIT 0;
+
+################
+## Test SP
+delimiter |;
+CREATE PROCEDURE wl1393_sp_test()
+BEGIN
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 30;
+SELECT * FROM t1 WHERE f1>10 ORDER BY f2, f0 LIMIT 15 OFFSET 15;
+SELECT SQL_CALC_FOUND_ROWS * FROM t1 WHERE f1>10
+ORDER BY f2, f0 LIMIT 15 OFFSET 15;
+SELECT FOUND_ROWS();
+SELECT * FROM v1 ORDER BY f2, f0 LIMIT 30;
+END|
+CALL wl1393_sp_test()|
+DROP PROCEDURE wl1393_sp_test|
+delimiter ;|
+
+################
+## Test with subqueries
+SELECT d1.f1, d1.f2 FROM t1
+LEFT JOIN (SELECT * FROM t1 ORDER BY f1 LIMIT 30) d1 on t1.f1=d1.f1
+ORDER BY d1.f2 DESC LIMIT 30;
+
+SELECT * FROM t1 WHERE f1 = (SELECT f1 FROM t1 ORDER BY 1 LIMIT 1);
+
+--error ER_SUBQUERY_NO_1_ROW
+SELECT * FROM t1 WHERE f1 = (SELECT f1 FROM t1 ORDER BY 1 LIMIT 2);
+
+DROP TABLE t1, tmp;
+DROP VIEW v1, v2;
+
+--echo # end of WL#1393 - Optimizing filesort with small limit
+
+--echo #
+--echo # Bug #58761
+--echo # Crash in Field::is_null in field.h on subquery in WHERE clause
+--echo #
+
+CREATE TABLE t1 (
+ pk INT NOT NULL AUTO_INCREMENT,
+ col_int_key INT DEFAULT NULL,
+ col_varchar_key VARCHAR(1) DEFAULT NULL,
+ PRIMARY KEY (pk),
+ KEY col_varchar_key (col_varchar_key,col_int_key)
+);
+
+INSERT INTO t1 VALUES (27,7,'x');
+INSERT INTO t1 VALUES (28,6,'m');
+INSERT INTO t1 VALUES (29,4,'c');
+
+CREATE TABLE where_subselect
+ SELECT DISTINCT `pk` AS field1 , `pk` AS field2
+ FROM t1 AS alias1
+ WHERE alias1 . `col_int_key` > 229
+ OR alias1 . `col_varchar_key` IS NOT NULL
+ GROUP BY field1, field2
+;
+
+SELECT *
+FROM where_subselect
+WHERE (field1, field2) IN (
+ SELECT DISTINCT `pk` AS field1 , `pk` AS field2
+ FROM t1 AS alias1
+ WHERE alias1 . `col_int_key` > 229
+ OR alias1 . `col_varchar_key` IS NOT NULL
+ GROUP BY field1, field2
+);
+
+DROP TABLE t1;
+DROP TABLE where_subselect;
+
+--echo # End of Bug #58761
+
+##
# Bug#35844: Covering index for ref access not compatible with ORDER BY list
#
diff --git a/mysql-test/t/order_by_sortkey.test b/mysql-test/t/order_by_sortkey.test
new file mode 100644
index 00000000000..43de028496e
--- /dev/null
+++ b/mysql-test/t/order_by_sortkey.test
@@ -0,0 +1,64 @@
+#
+# WL#1393 - ORDER BY with LIMIT tests
+#
+# A big test in a separate file, so that we can run the original
+# order_by test with --debug without timeout.
+#
+# All the sort keys fit in memory, but the addon fields do not.
+#
+CREATE TABLE t1(
+ f0 int auto_increment PRIMARY KEY,
+ f1 int,
+ f2 varchar(200)
+);
+
+INSERT INTO t1(f1, f2) VALUES
+(0,"0"),(1,"1"),(2,"2"),(3,"3"),(4,"4"),(5,"5"),
+(6,"6"),(7,"7"),(8,"8"),(9,"9"),(10,"10"),
+(11,"11"),(12,"12"),(13,"13"),(14,"14"),(15,"15"),
+(16,"16"),(17,"17"),(18,"18"),(19,"19"),(20,"20"),
+(21,"21"),(22,"22"),(23,"23"),(24,"24"),(25,"25"),
+(26,"26"),(27,"27"),(28,"28"),(29,"29"),(30,"30"),
+(31,"31"),(32,"32"),(33,"33"),(34,"34"),(35,"35"),
+(36,"36"),(37,"37"),(38,"38"),(39,"39"),(40,"40"),
+(41,"41"),(42,"42"),(43,"43"),(44,"44"),(45,"45"),
+(46,"46"),(47,"47"),(48,"48"),(49,"49"),(50,"50"),
+(51,"51"),(52,"52"),(53,"53"),(54,"54"),(55,"55"),
+(56,"56"),(57,"57"),(58,"58"),(59,"59"),(60,"60"),
+(61,"61"),(62,"62"),(63,"63"),(64,"64"),(65,"65"),
+(66,"66"),(67,"67"),(68,"68"),(69,"69"),(70,"70"),
+(71,"71"),(72,"72"),(73,"73"),(74,"74"),(75,"75"),
+(76,"76"),(77,"77"),(78,"78"),(79,"79"),(80,"80"),
+(81,"81"),(82,"82"),(83,"83"),(84,"84"),(85,"85"),
+(86,"86"),(87,"87"),(88,"88"),(89,"89"),(90,"90"),
+(91,"91"),(92,"92"),(93,"93"),(94,"94"),(95,"95"),
+(96,"96"),(97,"97"),(98,"98"),(99,"99");
+
+CREATE TEMPORARY TABLE tmp (f1 int, f2 varchar(20));
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+INSERT INTO tmp SELECT f1,f2 FROM t1;
+INSERT INTO t1(f1,f2) SELECT * FROM tmp;
+
+# Test when only sortkeys fits to memory
+set sort_buffer_size= 32768;
+
+FLUSH STATUS;
+SHOW SESSION STATUS LIKE 'Sort%';
+
+SELECT * FROM t1 ORDER BY f2 LIMIT 100;
+
+SHOW SESSION STATUS LIKE 'Sort%';
+
+DROP TABLE t1, tmp;
diff --git a/mysql-test/t/parser.test b/mysql-test/t/parser.test
index d477843b22b..2c8cfafb90a 100644
--- a/mysql-test/t/parser.test
+++ b/mysql-test/t/parser.test
@@ -553,7 +553,7 @@ select master_pos_wait();
-- error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
select master_pos_wait(1);
-- error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
-select master_pos_wait(1, 2, 3, 4);
+select master_pos_wait(1, 2, 3, 4, 5);
-- error ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT
select rand(1, 2, 3);
diff --git a/mysql-test/t/parser_not_embedded.test b/mysql-test/t/parser_not_embedded.test
index 22e9ae5a140..3ebd23e888e 100644
--- a/mysql-test/t/parser_not_embedded.test
+++ b/mysql-test/t/parser_not_embedded.test
@@ -8,15 +8,15 @@
--write_file $MYSQLTEST_VARDIR/tmp/bug39559.sql
select 2 as expected, /*!01000/**/*/ 2 as result;
-select 1 as expected, /*!99998/**/*/ 1 as result;
+select 1 as expected, /*!999998/**/*/ 1 as result;
select 3 as expected, /*!01000 1 + */ 2 as result;
-select 2 as expected, /*!99990 1 + */ 2 as result;
+select 2 as expected, /*!999990 1 + */ 2 as result;
select 7 as expected, /*!01000 1 + /* 8 + */ 2 + */ 4 as result;
-select 8 as expected, /*!99998 1 + /* 2 + */ 4 + */ 8 as result;
+select 8 as expected, /*!999998 1 + /* 2 + */ 4 + */ 8 as result;
select 7 as expected, /*!01000 1 + /*!01000 8 + */ 2 + */ 4 as result;
-select 7 as expected, /*!01000 1 + /*!99998 8 + */ 2 + */ 4 as result;
-select 4 as expected, /*!99998 1 + /*!99998 8 + */ 2 + */ 4 as result;
-select 4 as expected, /*!99998 1 + /*!01000 8 + */ 2 + */ 4 as result;
+select 7 as expected, /*!01000 1 + /*!999998 8 + */ 2 + */ 4 as result;
+select 4 as expected, /*!999998 1 + /*!999998 8 + */ 2 + */ 4 as result;
+select 4 as expected, /*!999998 1 + /*!01000 8 + */ 2 + */ 4 as result;
select 7 as expected, /*!01000 1 + /*!01000 8 + /*!01000 error */ 16 + */ 2 + */ 4 as result;
select 4 as expected, /* 1 + /*!01000 8 + */ 2 + */ 4;
EOF
diff --git a/mysql-test/t/partition_innodb_plugin.test b/mysql-test/t/partition_innodb_plugin.test
index 4693288a4dd..2eb9a2fa2a0 100644
--- a/mysql-test/t/partition_innodb_plugin.test
+++ b/mysql-test/t/partition_innodb_plugin.test
@@ -66,6 +66,7 @@ LOCK TABLE t1 WRITE;
--echo # ALTER fails because COMPRESSED/KEY_BLOCK_SIZE
--echo # are incompatible with innodb_file_per_table = OFF;
+--replace_regex / - .*//
--error ER_GET_ERRNO
ALTER TABLE t1 ADD PARTITION PARTITIONS 1;
diff --git a/mysql-test/t/show_explain.test b/mysql-test/t/show_explain.test
new file mode 100644
index 00000000000..57d86ece4f4
--- /dev/null
+++ b/mysql-test/t/show_explain.test
@@ -0,0 +1,1138 @@
+#
+# Tests for SHOW EXPLAIN FOR functionality
+#
+--source include/have_debug.inc
+--source include/have_innodb.inc
+
+--disable_warnings
+drop table if exists t0, t1, t2, t3, t4;
+drop view if exists v1;
+--enable_warnings
+SET @old_debug= @@session.debug;
+
+#
+# Testcases in this file do not work with embedded server. The reason for this
+# is that we use the following commands for synchronization:
+#
+# set @show_explain_probe_select_id=1;
+# set debug_dbug='d,show_explain_probe_join_exec_start';
+# send select count(*) from t1 where a < 100000;
+#
+# When ran with mysqltest_embedded, this translates into:
+#
+# Thread1> DBUG_PUSH("d,show_explain_probe_join_exec_start");
+# Thread1> create another thread for doing "send ... reap"
+# Thread2> mysql_parse("select count(*) from t1 where a < 100000");
+#
+# That is, "select count(*) ..." is ran in a thread for which DBUG_PUSH(...)
+# has not been called. As a result, show_explain_probe_join_exec_start does not fire, and
+# "select count(*) ..." does not wait till its SHOW EXPLAIN command, and the
+# test fails.
+#
+-- source include/not_embedded.inc
+
+set debug_sync='RESET';
+
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (a int);
+insert into t1 select A.a + 10*B.a + 100*C.a from t0 A, t0 B, t0 C;
+alter table t1 add b int, add c int, add filler char(32);
+update t1 set b=a, c=a, filler='fooo';
+alter table t1 add key(a), add key(b);
+
+#
+# Try killing a non-existent thread
+#
+--error ER_NO_SUCH_THREAD
+show explain for 2000000000;
+
+--error ER_SET_CONSTANTS_ONLY
+show explain for (select max(a) from t0);
+
+#
+# Setup two threads and their ids
+#
+let $thr1=`select connection_id()`;
+connect (con1, localhost, root,,);
+connection con1;
+let $thr2=`select connection_id()`;
+SET @old_debug= @@session.debug;
+connection default;
+
+# SHOW EXPLAIN FOR <idle thread>
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+
+# SHOW EXPLAIN FOR <ourselves>
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr1;
+
+let $wait_condition= select State='show_explain_trap' from information_schema.processlist where id=$thr2;
+
+#
+# Test SHOW EXPLAIN for simple queries
+#
+connection con1;
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send select count(*) from t1 where a < 100000;
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+
+
+send select max(c) from t1 where a < 10;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+
+
+--echo # We can catch EXPLAIN, too.
+set @show_expl_tmp= @@optimizer_switch;
+set optimizer_switch='index_condition_pushdown=on,mrr=on,mrr_sort_keys=on';
+send explain select max(c) from t1 where a < 10;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set optimizer_switch= @show_expl_tmp;
+set debug_dbug=@old_debug;
+
+--echo # UNION, first branch
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send explain select a from t0 A union select a+1 from t0 B;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+
+--echo # UNION, second branch
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send explain select a from t0 A union select a+1 from t0 B;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+
+--echo # Uncorrelated subquery, select
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send select a, (select max(a) from t0 B) from t0 A where a<1;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+
+--echo # Uncorrelated subquery, explain
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send explain select a, (select max(a) from t0 B) from t0 A where a<1;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+--echo # correlated subquery, select
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+--echo # correlated subquery, explain
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+--echo # correlated subquery, select, while inside the subquery
+set @show_explain_probe_select_id=2; # <---
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+--echo # correlated subquery, explain, while inside the subquery
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+--echo # correlated subquery, explain, while inside the subquery
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+send select a, (select max(a) from t0 b where b.a+a.a<10) from t0 a where a<1;
+connection default;
+--source include/wait_condition.inc
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+# TODO: explain in the parent subuqery when the un-correlated child has been
+# run (and have done irreversible cleanups)
+# ^^ Is this at all possible after 5.3?
+# Maybe, for 5.3 try this:
+# - run before/after the parent has invoked child's optimization
+# - run after materialization
+
+--echo # Try to do SHOW EXPLAIN for a query that runs a SET command:
+--echo # I've found experimentally that select_id==2 here...
+--echo #
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send set @foo= (select max(a) from t0 where sin(a) >0);
+connection default;
+--source include/wait_condition.inc
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+--echo #
+--echo # Attempt SHOW EXPLAIN for an UPDATE
+--echo #
+create table t2 as select a as a, a as dummy from t0 limit 2;
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send update t2 set dummy=0 where (select max(a) from t0 where t2.a + t0.a <3) >3 ;
+connection default;
+--source include/wait_condition.inc
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+--source include/wait_condition.inc
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+connection con1;
+reap;
+drop table t2;
+set debug_dbug=@old_debug;
+
+--echo #
+--echo # Attempt SHOW EXPLAIN for a DELETE
+--echo #
+create table t2 as select a as a, a as dummy from t0 limit 2;
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send delete from t2 where (select max(a) from t0 where t2.a + t0.a <3) >3 ;
+connection default;
+--source include/wait_condition.inc
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+--source include/wait_condition.inc
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+connection con1;
+reap;
+drop table t2;
+set debug_dbug=@old_debug;
+
+--echo #
+--echo # Multiple SHOW EXPLAIN calls for one select
+--echo #
+create table t2 as select a as a, a as dummy from t0 limit 3;
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send select t2.a, ((select max(a) from t0 where t2.a + t0.a <3) >3) as SUBQ from t2;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+drop table t2;
+set debug_dbug=@old_debug;
+
+--echo #
+--echo # SHOW EXPLAIN for SELECT ... ORDER BY with "Using filesort"
+--echo #
+explain select * from t0 order by a;
+
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+set @show_explain_probe_select_id=1;
+send select * from t0 order by a;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+--echo #
+--echo # SHOW EXPLAIN for SELECT ... with "Using temporary"
+--echo #
+connection default;
+explain select distinct a from t0;
+connection con1;
+
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+set @show_explain_probe_select_id=1;
+send select distinct a from t0;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+--echo #
+--echo # SHOW EXPLAIN for SELECT ... with "Using temporary; Using filesort"
+--echo #
+connection default;
+explain select distinct a from t0;
+connection con1;
+
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+set @show_explain_probe_select_id=1;
+send select distinct a from t0;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+--echo #
+--echo # MDEV-238: SHOW EXPLAIN: Server crashes in JOIN::print_explain with FROM subquery and GROUP BY
+--echo #
+CREATE TABLE t2 ( a INT );
+INSERT INTO t2 VALUES (1),(2),(1),(4),(2);
+explain SELECT alias.a FROM t2, ( SELECT * FROM t2 ) AS alias GROUP BY alias.a;
+
+set debug_dbug='+d,show_explain_in_find_all_keys';
+send SELECT alias.a FROM t2, ( SELECT * FROM t2 ) AS alias GROUP BY alias.a;
+
+connection default;
+--source include/wait_condition.inc
+--echo # FIXED by "conservative assumptions about when QEP is available" fix:
+--echo # NOTE: current code will not show "Using join buffer":
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+DROP TABLE t2;
+
+
+--echo #
+--echo # MDEV-239: Assertion `field_types == 0 ... ' failed in Protocol_text::store(double, uint32, String*) with
+--echo # SHOW EXPLAIN over EXPLAIN EXTENDED
+--echo #
+
+
+CREATE TABLE t2 (a INT);
+INSERT INTO t2 VALUES (1),(2),(1),(4),(2);
+
+EXPLAIN EXTENDED SELECT alias.a FROM t2, ( SELECT * FROM t2 ) AS alias GROUP BY alias.a ;
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+send EXPLAIN EXTENDED SELECT alias.a FROM t2, ( SELECT * FROM t2 ) AS alias GROUP BY alias.a ;
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+DROP TABLE t2;
+
+
+--echo #
+--echo # MDEV-240: SHOW EXPLAIN: Assertion `this->optimized == 2' failed in
+--echo # JOIN::print_explain on query with a JOIN, TEMPTABLE view,
+--echo #
+CREATE TABLE t3 (a INT);
+CREATE ALGORITHM=TEMPTABLE VIEW v1 AS SELECT * FROM t3;
+INSERT INTO t3 VALUES (8);
+CREATE TABLE t2 (b INT);
+INSERT INTO t2 VALUES (4),(5),(6),(7),(8),(9);
+explain SELECT * FROM v1, t2;
+
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+send SELECT * FROM v1, t2;
+
+connection default;
+--source include/wait_condition.inc
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+DROP VIEW v1;
+DROP TABLE t2, t3;
+
+--echo #
+--echo # MDEV-267: SHOW EXPLAIN: Server crashes in JOIN::print_explain on most of queries
+--echo #
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+send select sleep(1);
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+
+--echo #
+--echo # Same as above, but try another reason for JOIN to be degenerate
+--echo #
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+send select * from t0 where 1>10;
+connection default;
+--source include/wait_condition.inc
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+--echo #
+--echo # Same as above, but try another reason for JOIN to be degenerate (2)
+--echo #
+create table t3(a int primary key);
+insert into t3 select a from t0;
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+send select * from t0,t3 where t3.a=112233;
+connection default;
+--source include/wait_condition.inc
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+drop table t3;
+
+--echo #
+--echo # MDEV-270: SHOW EXPLAIN: server crashes in JOIN::print_explain on a query with
+--echo # select tables optimized away
+--echo #
+
+CREATE TABLE t2 (pk INT PRIMARY KEY, a INT ) ENGINE=MyISAM;
+INSERT INTO t2 VALUES
+ (1,4),(2,62),(3,7),(4,1),(5,0),(6,7),(7,7),(8,1),(9,7),(10,1),
+ (11,5),(12,2),(13,0),(14,1),(15,8),(16,1),(17,1),(18,9),(19,1),(20,5) ;
+
+explain SELECT * FROM t2 WHERE a =
+ (SELECT MAX(a) FROM t2
+ WHERE pk= (SELECT MAX(pk) FROM t2 WHERE pk = 3)
+ );
+
+set @show_explain_probe_select_id=2;
+set debug_dbug='+d,show_explain_probe_do_select';
+send SELECT * FROM t2 WHERE a =
+ (SELECT MAX(a) FROM t2
+ WHERE pk= (SELECT MAX(pk) FROM t2 WHERE pk = 3)
+ );
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+drop table t2;
+
+
+--echo #
+--echo # MDEV-273: SHOW EXPLAIN: server crashes in JOIN::print_explain on a query with impossible WHERE
+--echo #
+CREATE TABLE t2 (a1 INT, KEY(a1)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES
+ (4),(6),(7),(1),(0),(7),(7),(1),(7),(1),
+ (5),(2),(0),(1),(8),(1),(1),(9),(1),(5);
+
+CREATE TABLE t3 (b1 INT) ENGINE=MyISAM;
+INSERT INTO t3 VALUES
+ (4),(5),(8),(4),(8),(2),(9),(6),(4),(8),
+ (3),(5),(9),(6),(8),(3),(2),(6),(3),(1),
+ (4),(3),(1),(7),(0),(0),(9),(5),(9),(0),
+ (2),(2),(5),(9),(1),(4),(8),(6),(5),(5),
+ (1),(7),(2),(8),(9),(3),(2),(6),(6),(5),
+ (4),(3),(2),(7),(4),(6),(0),(8),(5),(8),
+ (2),(9),(7),(5),(7),(0),(4),(3),(1),(0),
+ (6),(2),(8),(3),(7),(3),(5),(5),(1),(2),
+ (1),(7),(1),(9),(9),(8),(3);
+CREATE TABLE t4 (c1 INT) ENGINE=MyISAM;
+
+EXPLAIN
+SELECT count(*) FROM t2, t3
+WHERE a1 < ALL (
+ SELECT a1 FROM t2
+ WHERE a1 IN ( SELECT a1 FROM t2, t4 )
+);
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_do_select';
+send
+SELECT count(*) FROM t2, t3
+WHERE a1 < ALL (
+ SELECT a1 FROM t2
+ WHERE a1 IN ( SELECT a1 FROM t2, t4 )
+);
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+drop table t2, t3, t4;
+
+--echo #
+--echo # MDEV-275: SHOW EXPLAIN: server crashes in JOIN::print_explain with IN subquery and aggregate function
+--echo #
+CREATE TABLE t2 ( `pk` INT NOT NULL PRIMARY KEY, `a1` INT NOT NULL, KEY(`a1`)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES
+ (1,5),(2,4),(3,6),(4,9),(5,2),(6,8),(7,4),(8,8),(9,0),(10,43),
+ (11,23),(12,3),(13,45),(14,16),(15,2),(16,33),(17,2),(18,5),(19,9),(20,2);
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_end';
+send
+ SELECT * FROM t2 WHERE (5, 78) IN (SELECT `a1`, MAX(`a1`) FROM t2 GROUP BY `a1`);
+connection default;
+--source include/wait_condition.inc
+--error ER_TARGET_NOT_EXPLAINABLE
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+DROP TABLE t2;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-305: SHOW EXPLAIN: ref returned by SHOW EXPLAIN is different from the normal EXPLAIN ('const' vs empty string)
+--echo #
+CREATE TABLE t1(a INT, KEY(a));
+INSERT INTO t1 VALUES (3),(1),(5),(1);
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+
+send SELECT 'test' FROM t1 WHERE a=1;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-299: SHOW EXPLAIN: Plan produced by SHOW EXPLAIN changes back and forth during query execution
+--echo #
+
+create table t1 (key1 int, col1 int, col2 int, filler char(100), key(key1));
+insert into t1 select A.a+ 10 * B.a, 10, 10, 'filler-data' from t0 A, t0 B;
+
+# Make matches 3 records
+update t1 set col1=3, col2=10 where key1=1; # small range
+update t1 set col1=3, col2=1000 where key1=2; # big range
+update t1 set col1=3, col2=10 where key1=3; # small range again
+update t1 set col1=3, col2=1000 where key1=4; # big range
+
+set @tmp_mdev299_jcl= @@join_cache_level;
+set join_cache_level=0;
+
+explain select count(*) from t1 A, t1 B where B.key1 < A.col2 and A.col1=3 AND B.col2 + 1 < 100;
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_test_if_quick_select';
+
+send
+select count(*) from t1 A, t1 B where B.key1 < A.col2 and A.col1=3 AND B.col2 + 1 < 100;
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+
+set debug_dbug=@old_debug;
+drop table t1;
+
+--echo #
+--echo # MDEV-297: SHOW EXPLAIN: Server gets stuck until timeout occurs while
+--echo # executing SHOW INDEX and SHOW EXPLAIN in parallel
+--echo #
+CREATE TABLE t1(a INT, b INT, c INT, KEY(a), KEY(b), KEY(c));
+INSERT INTO t1 (a) VALUES (3),(1),(5),(1);
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+
+send SHOW INDEX FROM t1;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-324: SHOW EXPLAIN: Plan produced by SHOW EXPLAIN for a query with TEMPTABLE view
+--echo # loses 'DERIVED' line on the way without saying that the plan was already deleted
+--echo #
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+CREATE ALGORITHM=TEMPTABLE VIEW v1 AS SELECT * FROM t1;
+
+EXPLAIN SELECT a + 1 FROM v1;
+
+set debug_dbug='+d,show_explain_probe_join_tab_preread';
+set @show_explain_probe_select_id=1;
+
+send
+ SELECT a + 1 FROM v1;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+DROP VIEW v1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-323: SHOW EXPLAIN: Plan produced by SHOW EXPLAIN loses
+--echo # 'UNION RESULT' line on the way without saying that the plan was already deleted
+--echo #
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (4),(6);
+
+EXPLAIN
+SELECT a FROM t1 WHERE a IN ( SELECT 1+SLEEP(0.01) UNION SELECT 2 );
+
+set debug_dbug='+d,show_explain_probe_union_read';
+send
+SELECT a FROM t1 WHERE a IN ( SELECT 1+SLEEP(0.01) UNION SELECT 2 );
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+
+connection con1;
+reap;
+
+set debug_dbug=@old_debug;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-327: SHOW EXPLAIN: Different select_type in plans produced by SHOW EXPLAIN
+--echo # and standard EXPLAIN: 'SUBQUERY' vs 'DEPENDENT SUBQUERY'
+--echo #
+CREATE TABLE t1 (a INT) ENGINE=Aria;
+INSERT INTO t1 VALUES
+(4),(6),(3),(5),(3),(246),(2),(9),(3),(8),
+(1),(8),(8),(5),(7),(5),(1),(6),(2),(9);
+
+CREATE TABLE t2 (b INT) ENGINE=Aria;
+INSERT INTO t2 VALUES
+(1),(7),(4),(7),(0),(2),(9),(4),(0),(9),
+(1),(3),(8),(8),(18),(84),(6),(3),(6),(6);
+
+EXPLAIN
+SELECT * FROM t1, ( SELECT * FROM t2 ) AS alias
+WHERE a < ALL ( SELECT b FROM t1, t2 WHERE a = b );
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+--send
+SELECT * FROM t1, ( SELECT * FROM t2 ) AS alias
+WHERE a < ALL ( SELECT b FROM t1, t2 WHERE a = b );
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Test that SHOW EXPLAIN will print 'Distinct'.
+--echo #
+CREATE TABLE t1 (a int(10) unsigned not null primary key,b int(10) unsigned);
+INSERT INTO t1 VALUES (1,1),(2,1),(3,1),(4,1);
+
+CREATE TABLE t3 (a int(10) unsigned, key(A), b text);
+INSERT INTO t3 VALUES (1,'1'),(2,'2');
+
+create temporary table t4 select * from t3;
+insert into t3 select * from t4;
+insert into t4 select * from t3;
+insert into t3 select * from t4;
+insert into t4 select * from t3;
+insert into t3 select * from t4;
+insert into t4 select * from t3;
+insert into t3 select * from t4;
+explain select distinct t1.a from t1,t3 where t1.a=t3.a;
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+--send
+select distinct t1.a from t1,t3 where t1.a=t3.a;
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+drop table t1,t3,t4;
+
+--echo #
+--echo # ---------- SHOW EXPLAIN and permissions -----------------
+--echo #
+grant ALL on test.* to test2@localhost;
+
+# Give the user SUPER privilege so it can set debug_dbug variable.
+grant super on *.* to test2@localhost;
+connect (con2, localhost, test2,,);
+connection con1;
+
+--echo #
+--echo # First, make sure that user 'test2' cannot do SHOW EXPLAIN on us
+--echo #
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send
+select * from t0 where a < 3;
+
+connection default;
+--source include/wait_condition.inc
+
+connection con2;
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+evalp show explain for $thr2;
+
+connection default;
+evalp show explain for $thr2;
+
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+--echo #
+--echo # Check that user test2 can do SHOW EXPLAIN on its own queries
+--echo #
+connect (con3, localhost, test2,,);
+
+connection con2;
+let $thr_con2=`select connection_id()`;
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send
+select * from t0 where a < 3;
+
+connection con1;
+let $save_wait_condition= $wait_condition;
+let $wait_condition= select State='show_explain_trap' from information_schema.processlist where id=$thr_con2;
+--source include/wait_condition.inc
+
+connection con3;
+evalp show explain for $thr_con2;
+
+connection con2;
+reap;
+
+connection con1;
+
+disconnect con3;
+let $wait_condition= $save_wait_condition;
+--echo #
+--echo # Now, grant test2 a PROCESSLIST permission, and see that he's able to observe us
+--echo #
+disconnect con2;
+grant process on *.* to test2@localhost;
+connect (con2, localhost, test2,,);
+connection con1;
+set debug_dbug=@old_debug;
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+send
+select * from t0 where a < 3;
+
+connection default;
+--source include/wait_condition.inc
+
+connection con2;
+evalp show explain for $thr2;
+
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+revoke all privileges on test.* from test2@localhost;
+drop user test2@localhost;
+
+disconnect con2;
+--echo #
+--echo # Test that it is possible to KILL a SHOW EXPLAIN command that's waiting
+--echo # on its target thread
+--echo #
+connect (con2, localhost, root,,);
+connect (con3, localhost, root,,);
+connection con2;
+create table t1 (pk int primary key, data char(64)) engine=innodb;
+insert into t1 select A.a + 10 * B.a + 100 * C.a, 'data1' from t0 A, t0 B, t0 C;
+
+--echo # Lock two threads
+set autocommit=0;
+select * from t1 where pk between 10 and 20 for update;
+
+connection con1;
+set autocommit=0;
+# This will freeze
+send
+select * from t1 where pk between 10 and 20 for update;
+
+# run SHOW EXPLAIN on a frozen thread
+connection default;
+let $save_wait_condition= $wait_condition;
+let $wait_condition= select State='Sending data' from information_schema.processlist where id=$thr2;
+let $thr_default=`select connection_id()`;
+--source include/wait_condition.inc
+--echo # do: send_eval show explain for thr2;
+--disable_query_log
+send_eval show explain for $thr2;
+--enable_query_log
+
+# kill the SHOW EXPLAIN command
+connection con3;
+let $wait_condition= select State='show_explain' from information_schema.processlist where id=$thr_default;
+--source include/wait_condition.inc
+evalp kill query $thr_default;
+
+connection default;
+--error ER_QUERY_INTERRUPTED
+reap;
+
+connection con2;
+rollback;
+
+connection con1;
+reap;
+
+drop table t1;
+disconnect con3;
+disconnect con2;
+let $wait_condition= $save_wait_condition;
+
+--echo #
+--echo # Check that the I_S table is invisible
+--echo #
+select table_name from information_schema.tables where table_schema='information_schema' and table_name like '%explain%';
+
+--echo #
+--echo # MDEV-325: SHOW EXPLAIN: Plan produced by SHOW EXPLAIN is different from standard EXPLAIN: type ALL vs 'index_merge'..
+--echo #
+CREATE TABLE t1 (a INT, b INT, KEY(a), KEY(b)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES
+(8,0),(128,5050),(5372,8),(234,7596),(2,0),(2907,8930),(1,0),
+(0,5224),(8,7638),(960,5),(9872,1534),(0,2295),(3408,9809),
+(7,0),(1168,0),(2089,5570),(0,205),(88,1018),(0,26528),
+(0,0),(4,5567),(1444,145),(6,0),(1,7535),(7793,534),(70,9),
+(178,1),(44,5),(189,0),(3,0);
+
+EXPLAIN
+SELECT a+SLEEP(0.01) FROM t1
+WHERE a IN ( 255, 0 ) OR b BETWEEN 6 AND 129
+ORDER BY b;
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+--send
+SELECT a+SLEEP(0.01) FROM t1
+WHERE a IN ( 255, 0 ) OR b BETWEEN 6 AND 129
+ORDER BY b;
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+
+connection con1;
+reap;
+set debug_dbug=@old_debug;
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_do_select';
+--send
+SELECT a+SLEEP(0.01) FROM t1
+WHERE a IN ( 255, 0 ) OR b BETWEEN 6 AND 129
+ORDER BY b;
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+
+connection con1;
+reap;
+
+set debug_dbug=@old_debug;
+
+drop table t1;
+
+--echo #
+--echo # MDEV-298: SHOW EXPLAIN: Plan returned by SHOW EXPLAIN only contains
+--echo # 'Using temporary' while the standard EXPLAIN says 'Using temporary; Using filesort'
+--echo #
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),
+ (10),(11),(12),(13),(14),(15),(16);
+INSERT INTO t1 SELECT t11.a FROM t1 t11, t1 t12, t1 t13;
+
+EXPLAIN SELECT a FROM t1 GROUP BY a;
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+--send
+SELECT a FROM t1 GROUP BY a;
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+
+connection con1;
+reap;
+
+set debug_dbug=@old_debug;
+
+drop table t1;
+
+--echo #
+--echo # MDEV-408: SHOW EXPLAIN: Some values are chopped off in SHOW EXPLAIN output
+--echo #
+CREATE TABLE t1 (a INT, b VARCHAR(35)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (3989,'Abilene'),(3873,'Akron');
+
+CREATE TABLE t2 (c INT, d VARCHAR(52) PRIMARY KEY, KEY(c)) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (86,'English'),(87,'Russian');
+
+explain SELECT SUM(a + SLEEP(0.1)) FROM t1 WHERE a IN ( SELECT c FROM t2 WHERE d < b ) OR b < 's';
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+--send
+SELECT SUM(a + SLEEP(0.1)) FROM t1 WHERE a IN ( SELECT c FROM t2 WHERE d < b ) OR b < 's';
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+
+connection con1;
+reap;
+
+set debug_dbug=@old_debug;
+drop table t1, t2;
+
+--echo #
+--echo # MDEV-412: SHOW EXPLAIN: Server crashes in JOIN::print_explain on a query with inner join and ORDER BY the same column twice
+--echo #
+CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(3), KEY(b)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES
+(3795,'USA'),(3913,'USA'),(3846,'ITA'),(4021,'USA'),(4005,'RUS'),(4038,'USA'),
+(3825,'USA'),(3840,'USA'),(3987,'USA'),(3807,'USA'),(3896,'USA'),(4052,'USA'),
+(3973,'USA'),(3982,'ITA'),(3965,'USA'),(3852,'RUS'),(4006,'USA'),(3800,'USA'),
+(4020,'USA'),(4040,'USA'),(3916,'USA'),(3817,'USA'),(3885,'USA'),(3802,'USA'),
+(4009,'ITA'),(3895,'USA'),(3963,'RUS'),(4045,'USA'),(3988,'USA'),(3815,'USA'),
+(4063,'USA'),(3978,'USA'),(4019,'USA'),(3954,'USA'),(3950,'USA'),(3974,'ITA'),
+(4054,'USA'),(4061,'RUS'),(3976,'USA'),(3966,'USA'),(3957,'USA'),(3981,'USA'),
+(3923,'USA'),(3876,'USA'),(3819,'USA'),(3877,'USA'),(3829,'ITA'),(3964,'USA'),
+(4053,'RUS'),(3917,'USA'),(3874,'USA'),(4023,'USA'),(4001,'USA'),(3872,'USA'),
+(3890,'USA'),(3962,'USA'),(3886,'USA'),(4026,'ITA'),(3869,'USA'),(3937,'RUS'),
+(3975,'USA'),(3944,'USA'),(3908,'USA'),(3867,'USA'),(3947,'USA'),(3838,'USA'),
+(3796,'USA'),(3893,'USA'),(3920,'ITA'),(3994,'USA'),(3875,'RUS'),(4011,'USA'),
+(4013,'USA'),(3810,'USA'),(3834,'USA'),(3968,'USA'),(3931,'USA'),(3839,'USA'),
+(4042,'USA'),(4039,'ITA'),(3811,'USA'),(3837,'RUS'),(4041,'USA'),(3884,'USA'),
+(3894,'USA'),(3879,'USA'),(3942,'USA'),(3959,'USA'),(3814,'USA'),(4044,'USA'),
+(3971,'ITA'),(3823,'USA'),(3793,'RUS'),(3855,'USA'),(3905,'USA'),(3865,'USA'),
+(4046,'USA'),(3990,'USA'),(4022,'USA'),(3833,'USA'),(3918,'USA'),(4064,'ITA'),
+(3821,'USA'),(3836,'RUS'),(3921,'USA'),(3914,'USA'),(3888,'USA');
+
+CREATE TABLE t2 (c VARCHAR(3) PRIMARY KEY) ENGINE=MyISAM;
+INSERT INTO t2 VALUES ('USA');
+
+CREATE TABLE t3 (d VARCHAR(3), e VARCHAR(52), PRIMARY KEY (d,e)) ENGINE=MyISAM;
+INSERT INTO t3 VALUES
+('JPN','Japanese'),('KOR','Korean'),('POL','Polish'),('PRT','Portuguese'),
+('ESP','Spanish'),('FRA','French'),('VNM','Vietnamese');
+
+explain
+SELECT b AS field1, b AS field2 FROM t1, t2, t3 WHERE d = b ORDER BY field1, field2;
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_do_select';
+
+send
+SELECT b AS field1, b AS field2 FROM t1, t2, t3 WHERE d = b ORDER BY field1, field2;
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+
+connection con1;
+reap;
+
+set debug_dbug=@old_debug;
+
+DROP TABLE t1,t2,t3;
+
+
+--echo #
+--echo # MDEV-423: SHOW EXPLAIN: 'Using where' for a subquery is shown in EXPLAIN, but not in SHOW EXPLAIN output
+--echo #
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (7),(0),(9),(3),(4),(2),(5),(7),(0),(9),(3),(4),(2),(5);
+
+CREATE TABLE t2 (b INT, c INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES
+(0,4),(8,6),(1,3),(8,5),(9,3),(24,246),(6,2),(1,9),(6,3),(2,8),
+(4,1),(8,8),(4,8),(4,5),(7,7),(4,5),(1,1),(9,6),(4,2),(8,9);
+
+create table t3 like t2;
+insert into t3 select * from t2;
+
+explain
+SELECT max(a+b+c) FROM t1 AS alias1, ( SELECT * FROM t2 ) AS alias
+WHERE EXISTS ( SELECT * FROM t3 WHERE b = c ) OR a <= 10;
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+
+send
+SELECT max(a+b+c) FROM t1 AS alias1, ( SELECT * FROM t2 ) AS alias
+WHERE EXISTS ( SELECT * FROM t3 WHERE b = c ) OR a <= 10;
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+
+connection con1;
+reap;
+
+set debug_dbug=@old_debug;
+DROP TABLE t1,t2,t3;
+
+--echo #
+--echo # MDEV-416: Server crashes in SQL_SELECT::cleanup on EXPLAIN with SUM ( DISTINCT ) in a non-correlated subquery (5.5-show-explain tree)
+--echo #
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+
+CREATE TABLE t2 (b INT);
+INSERT INTO t2 VALUES (8),(9);
+
+EXPLAIN SELECT * FROM t1
+WHERE ( 8, 89 ) IN ( SELECT b, SUM( DISTINCT b ) FROM t2 GROUP BY b );
+
+DROP TABLE t1,t2;
+
+--echo #
+--echo # Check if queries in non-default charsets work.
+--echo #
+
+set names cp1251;
+# The below are two Russian letters with codes E3FB in cp1251 encoding.
+select charset('ãû');
+select hex('ãû');
+
+set @show_explain_probe_select_id=1;
+set debug_dbug='+d,show_explain_probe_join_exec_start';
+
+send
+select * from t0 where length('ãû') = a;
+
+connection default;
+set names utf8;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+set names default;
+
+connection con1;
+# The constant should be two letters, the last looking like 'bl'
+reap;
+
+set debug_dbug=@old_debug;
+set names default;
+
+--echo #
+--echo # MDEV-462: SHOW EXPLAIN: Assertion `table_list->table' fails in find_field_in_table_ref if FOR contains a non-numeric value
+--echo #
+--error ER_SET_CONSTANTS_ONLY
+show explain for foo;
+
+--echo # End
+drop table t0;
+
+connection default;
+disconnect con1;
+set debug_sync='RESET';
+
diff --git a/mysql-test/t/show_explain_ps.test b/mysql-test/t/show_explain_ps.test
new file mode 100644
index 00000000000..4ad1e4304de
--- /dev/null
+++ b/mysql-test/t/show_explain_ps.test
@@ -0,0 +1,51 @@
+#
+# Test how SHOW EXPLAIN is represented in performance schema
+#
+--source include/have_debug.inc
+--source include/have_perfschema.inc
+# Like all other perfschema tests, we don't work on embedded server:
+--source include/not_embedded.inc
+
+--disable_warnings
+drop table if exists t0, t1;
+--enable_warnings
+
+select * from performance_schema.setup_instruments where name like '%show_explain%';
+
+--echo # We've got no instances
+select * from performance_schema.cond_instances where name like '%show_explain%';
+
+--echo # Check out if our cond is hit.
+
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+let $thr1=`select connection_id()`;
+connect (con1, localhost, root,,);
+connection con1;
+let $thr2=`select connection_id()`;
+connection default;
+
+let $wait_condition= select State='show_explain_trap' from information_schema.processlist where id=$thr2;
+
+#
+# Test SHOW EXPLAIN for simple queries
+#
+connection con1;
+set @show_explain_probe_select_id=1;
+set debug_dbug='d,show_explain_probe_join_exec_start';
+send select count(*) from t0 where a < 100000;
+
+connection default;
+--source include/wait_condition.inc
+evalp show explain for $thr2;
+connection con1;
+reap;
+
+set debug_dbug='';
+
+select event_name
+from performance_schema.events_waits_history_long
+where event_name='wait/synch/cond/sql/show_explain';
+
+drop table t0;
diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test
index 0a368826ee7..9db6709a8d2 100644
--- a/mysql-test/t/sp.test
+++ b/mysql-test/t/sp.test
@@ -7394,7 +7394,7 @@ begin
/*! select 2; */
select 3;
/*!00000 select 4; */
- /*!99999 select 5; */
+ /*!999999 select 5; */
end
$$
@@ -7402,7 +7402,7 @@ create procedure proc_25411_b(
/* real comment */
/*! p1 int, */
/*!00000 p2 int */
-/*!99999 ,p3 int */
+/*!999999 ,p3 int */
)
begin
select p1, p2;
@@ -7411,11 +7411,11 @@ $$
create procedure proc_25411_c()
begin
- select 1/*!,2*//*!00000,3*//*!99999,4*/;
- select 1/*! ,2*//*!00000 ,3*//*!99999 ,4*/;
- select 1/*!,2 *//*!00000,3 *//*!99999,4 */;
- select 1/*! ,2 *//*!00000 ,3 *//*!99999 ,4 */;
- select 1 /*!,2*/ /*!00000,3*/ /*!99999,4*/ ;
+ select 1/*!,2*//*!00000,3*//*!999999,4*/;
+ select 1/*! ,2*//*!00000 ,3*//*!999999 ,4*/;
+ select 1/*!,2 *//*!00000,3 *//*!999999,4 */;
+ select 1/*! ,2 *//*!00000 ,3 *//*!999999 ,4 */;
+ select 1 /*!,2*/ /*!00000,3*/ /*!999999,4*/ ;
end
$$
diff --git a/mysql-test/t/stat_tables.test b/mysql-test/t/stat_tables.test
new file mode 100644
index 00000000000..848fa272d39
--- /dev/null
+++ b/mysql-test/t/stat_tables.test
@@ -0,0 +1,207 @@
+--source include/have_stat_tables.inc
+
+select @@global.use_stat_tables;
+select @@session.use_stat_tables;
+
+set @save_use_stat_tables=@@use_stat_tables;
+
+set use_stat_tables='preferably';
+
+--disable_warnings
+DROP DATABASE IF EXISTS dbt3_s001;
+--enable_warnings
+
+CREATE DATABASE dbt3_s001;
+
+use dbt3_s001;
+
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='extended_keys=off';
+
+--disable_query_log
+--disable_result_log
+--disable_warnings
+--source include/dbt3_s001.inc
+create index i_p_retailprice on part(p_retailprice);
+delete from mysql.table_stats;
+delete from mysql.column_stats;
+delete from mysql.index_stats;
+ANALYZE TABLE
+customer, lineitem, nation, orders, part, partsupp, region, supplier;
+FLUSH TABLE mysql.table_stats, mysql.index_stats;
+--enable_warnings
+--enable_result_log
+--enable_query_log
+
+select * from mysql.table_stats;
+select * from mysql.index_stats;
+
+set optimizer_switch=@save_optimizer_switch;
+
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='index_condition_pushdown=off';
+
+let $Q5=
+select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
+from customer, orders, lineitem, supplier, nation, region
+where c_custkey = o_custkey and l_orderkey = o_orderkey
+ and l_suppkey = s_suppkey and c_nationkey = s_nationkey
+ and s_nationkey = n_nationkey and n_regionkey = r_regionkey
+ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
+ and o_orderdate < date '1995-01-01' + interval '1' year
+group by n_name
+order by revenue desc;
+
+eval EXPLAIN $Q5;
+eval $Q5;
+
+set optimizer_switch=@save_optimizer_switch;
+
+delete from mysql.index_stats;
+
+--disable_query_log
+--disable_result_log
+--disable_warnings
+ANALYZE TABLE
+customer, lineitem, nation, orders, part, partsupp, region, supplier;
+FLUSH TABLE mysql.table_stats, mysql.index_stats;
+--enable_warnings
+--enable_result_log
+--enable_query_log
+
+select * from mysql.table_stats;
+select * from mysql.index_stats;
+
+select * from mysql.table_stats where table_name='orders';
+select * from mysql.index_stats where table_name='orders';
+select (select cardinality from mysql.table_stats where table_name='orders') /
+ (select avg_frequency from mysql.index_stats
+ where index_name='i_o_orderdate' and prefix_arity=1) as n_distinct;
+select count(distinct o_orderdate) from orders;
+select (select cardinality from mysql.table_stats where table_name='orders') /
+ (select avg_frequency from mysql.index_stats
+ where index_name='i_o_custkey' and prefix_arity=1) as n_distinct;
+select count(distinct o_custkey) from orders;
+show index from orders;
+select index_name, column_name, cardinality from information_schema.statistics
+ where table_name='orders';
+
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='index_condition_pushdown=off';
+
+eval EXPLAIN $Q5;
+eval $Q5;
+
+set optimizer_switch=@save_optimizer_switch;
+
+let $Q8=
+select o_year,
+ sum(case when nation = 'UNITED STATES' then volume else 0 end) /
+ sum(volume) as mkt_share
+from (select extract(year from o_orderdate) as o_year,
+ l_extendedprice * (1-l_discount) as volume,
+ n2.n_name as nation
+ from part, supplier, lineitem, orders, customer,
+ nation n1, nation n2, region
+ where p_partkey = l_partkey and s_suppkey = l_suppkey
+ and l_orderkey = o_orderkey and o_custkey = c_custkey
+ and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey
+ and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey
+ and o_orderdate between date '1995-01-01' and date '1996-12-31'
+ and p_type = 'STANDARD BRUSHED STEEL' ) as all_nations
+group by o_year
+order by o_year;
+
+eval EXPLAIN $Q8;
+eval $Q8;
+
+
+let $Q9=
+select nation, o_year, sum(amount) as sum_profit
+from (select n_name as nation,
+ extract(year from o_orderdate) as o_year,
+ l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount
+ from part, supplier, lineitem, partsupp, orders, nation
+ where s_suppkey = l_suppkey and ps_suppkey = l_suppkey
+ and ps_partkey = l_partkey and p_partkey = l_partkey
+ and o_orderkey = l_orderkey and s_nationkey = n_nationkey
+ and p_name like '%green%') as profit
+group by nation, o_year
+order by nation, o_year desc;
+
+eval EXPLAIN $Q9;
+eval $Q9;
+
+
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='extended_keys=on';
+
+let $QQ1=
+select o_orderkey, p_partkey
+ from part, lineitem, orders
+ where p_retailprice > 1100 and o_orderdate='1997-01-01'
+ and o_orderkey=l_orderkey and p_partkey=l_partkey;
+
+eval EXPLAIN $QQ1;
+eval $QQ1;
+
+set optimizer_switch=@save_optimizer_switch;
+
+
+DROP DATABASE dbt3_s001;
+
+use test;
+
+--echo #
+--echo # Bug mdev-473: ANALYZE table locked for write
+--echo #
+
+set use_stat_tables='complementary';
+
+create table t1 (i int);
+
+lock table t1 write;
+analyze table t1;
+alter table t1 add column a varchar(8);
+
+drop table t1;
+
+--echo #
+--echo # Bug mdev-487: memory leak in ANALYZE with stat tables
+--echo #
+
+SET use_stat_tables = 'preferably';
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2);
+DELETE FROM t1 WHERE a=1;
+
+ANALYZE TABLE t1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug mdev-518: corrupted/missing statistical tables
+--echo #
+
+CREATE TABLE t1 (i int) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1),(2);
+
+FLUSH TABLE t1;
+SET use_stat_tables='never';
+EXPLAIN SELECT * FROM t1;
+
+--move_file $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MYD $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MYD.save
+
+FLUSH TABLES;
+SET use_stat_tables='preferably';
+--disable_warnings
+EXPLAIN SELECT * FROM t1;
+--enable_warnings
+
+# Cleanup
+--move_file $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MYD.save $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MYD
+DROP TABLE t1;
+
+set use_stat_tables=@save_use_stat_tables;
+
diff --git a/mysql-test/t/stat_tables_disabled.test b/mysql-test/t/stat_tables_disabled.test
new file mode 100644
index 00000000000..c9d923f903b
--- /dev/null
+++ b/mysql-test/t/stat_tables_disabled.test
@@ -0,0 +1,78 @@
+--source include/have_innodb.inc
+
+SET SESSION STORAGE_ENGINE='InnoDB';
+
+select @@global.use_stat_tables;
+select @@session.use_stat_tables;
+
+set @save_use_stat_tables=@@use_stat_tables;
+
+set use_stat_tables='preferably';
+
+--disable_warnings
+DROP DATABASE IF EXISTS dbt3_s001;
+--enable_warnings
+
+CREATE DATABASE dbt3_s001;
+
+use dbt3_s001;
+
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='extended_keys=off';
+
+--disable_query_log
+--disable_result_log
+--disable_warnings
+--source include/dbt3_s001.inc
+delete from mysql.table_stats;
+delete from mysql.column_stats;
+delete from mysql.index_stats;
+ANALYZE TABLE
+customer, lineitem, nation, orders, part, partsupp, region, supplier;
+--enable_warnings
+--enable_result_log
+--enable_query_log
+
+--echo #
+--echo # Bug mdev-503: optimizer ignores setting use_stat_tables='preferably'
+--echo #
+
+flush tables
+customer, lineitem, nation, orders, part, partsupp, region, supplier;
+
+let $Q3S=
+select sql_calc_found_rows straight_join
+ l_orderkey, sum(l_extendedprice*(1-l_discount)) as revenue,
+ o_orderdate, o_shippriority
+from orders, customer, lineitem
+where c_mktsegment = 'BUILDING' and c_custkey = o_custkey
+ and l_orderkey = o_orderkey and o_orderdate < date '1995-03-15'
+ and l_shipdate > date '1995-03-15'
+group by l_orderkey, o_orderdate, o_shippriority
+order by revenue desc, o_orderdate
+limit 10;
+
+set use_stat_tables='never';
+--replace_column 9 #
+eval EXPLAIN $Q3S;
+
+set use_stat_tables='preferably';
+--replace_result 2 1
+eval EXPLAIN $Q3S;
+
+flush tables customer, orders, lineitem;
+eval EXPLAIN $Q3S;
+
+--echo # End of the test case for mdev-503
+
+set optimizer_switch=@save_optimizer_switch;
+
+
+DROP DATABASE dbt3_s001;
+
+use test;
+
+set use_stat_tables=@save_use_stat_tables;
+
+
+SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/t/stat_tables_innodb.test b/mysql-test/t/stat_tables_innodb.test
new file mode 100644
index 00000000000..04e81de8f9d
--- /dev/null
+++ b/mysql-test/t/stat_tables_innodb.test
@@ -0,0 +1,12 @@
+--source include/have_innodb.inc
+
+SET SESSION STORAGE_ENGINE='InnoDB';
+
+set @save_optimizer_switch_for_stat_tables_test=@@optimizer_switch;
+set optimizer_switch='extended_keys=on';
+
+--source stat_tables.test
+
+set optimizer_switch=@save_optimizer_switch_for_stat_tables_test;
+
+SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/t/stat_tables_par.test b/mysql-test/t/stat_tables_par.test
new file mode 100644
index 00000000000..6c4e1be6e48
--- /dev/null
+++ b/mysql-test/t/stat_tables_par.test
@@ -0,0 +1,278 @@
+--source include/have_stat_tables.inc
+--source include/have_debug_sync.inc
+--source include/not_embedded.inc
+
+set @save_use_stat_tables=@@use_stat_tables;
+
+set use_stat_tables='preferably';
+
+--disable_warnings
+DROP DATABASE IF EXISTS dbt3_s001;
+--enable_warnings
+
+CREATE DATABASE dbt3_s001;
+
+use dbt3_s001;
+
+set @save_optimizer_switch=@@optimizer_switch;
+set optimizer_switch='extended_keys=off';
+
+--disable_query_log
+--disable_result_log
+--disable_warnings
+--source include/dbt3_s001.inc
+delete from mysql.table_stats;
+delete from mysql.column_stats;
+delete from mysql.index_stats;
+ANALYZE TABLE
+customer, lineitem, nation, orders, part, partsupp, region, supplier;
+--enable_warnings
+--enable_result_log
+--enable_query_log
+
+select * from mysql.table_stats;
+select * from mysql.index_stats;
+
+
+#
+# Test for parallel memory allocation for statistical data
+#
+# assumes that start the code of memory allocation for stats data has this line:
+#
+# DEBUG_SYNC(thd, "statistics_mem_alloc_start1");
+# DEBUG_SYNC(thd, "statistics_mem_alloc_start2");
+#
+
+let $Q6=
+select sum(l_extendedprice*l_discount) as revenue
+from lineitem
+where l_shipdate >= date '1994-01-01'
+ and l_shipdate < date '1994-01-01' + interval '1' year
+ and l_discount between 0.06 - 0.01 and 0.06 + 0.01
+ and l_quantity < 24;
+
+flush table lineitem;
+set use_stat_tables='never';
+eval $Q6;
+
+connect (con1, localhost, root,,);
+connect (con2, localhost, root,,);
+
+connection con1;
+set debug_sync='statistics_mem_alloc_start1 WAIT_FOR second_thread_started_too';
+set debug_sync='statistics_mem_alloc_start2 SIGNAL first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+--send_eval $Q6
+
+connection con2;
+set debug_sync='statistics_mem_alloc_start1 SIGNAL second_thread_started_too';
+set debug_sync='statistics_mem_alloc_start2 WAIT_FOR first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+--send_eval $Q6
+
+connection con1;
+--reap
+
+connection con2;
+--reap
+
+connection default;
+set use_stat_tables='preferably';
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+
+#
+# Test for parallel statistics collection
+#
+# assumes that start of stats collection code has this line:
+#
+# DEBUG_SYNC(thd, "statistics_collection_start1");
+# DEBUG_SYNC(thd, "statistics_collection_start2");
+#
+
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+delete from mysql.index_stats
+ where table_name='lineitem' and
+ index_name in ('i_l_shipdate', 'i_l_receiptdate');
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+--disable_result_log
+--disable_warnings
+analyze table lineitem persistent for columns() indexes (i_l_shipdate);
+--enable_warnings
+--enable_result_log
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+delete from mysql.index_stats
+ where table_name='lineitem' and index_name= 'i_l_shipdate';
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+
+connect (con1, localhost, root,,);
+connect (con2, localhost, root,,);
+
+connection con1;
+set debug_sync='statistics_collection_start1 WAIT_FOR second_thread_started_too';
+set debug_sync='statistics_collection_start2 SIGNAL first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+--send analyze table lineitem persistent for columns() indexes (i_l_shipdate)
+
+connection con2;
+set debug_sync='statistics_collection_start1 SIGNAL second_thread_started_too';
+set debug_sync='statistics_collection_start2 WAIT_FOR first_thread_working';
+use dbt3_s001;
+set use_stat_tables='preferably';
+--send analyze table lineitem persistent for columns() indexes (i_l_receiptdate)
+
+connection con1;
+--disable_result_log
+--disable_warnings
+--reap
+--enable_warnings
+--enable_result_log
+
+connection con2;
+--disable_result_log
+--disable_warnings
+--reap
+--enable_warnings
+--enable_result_log
+
+connection default;
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+
+select * from mysql.index_stats where table_name='lineitem' order by index_name;
+
+#
+# Test for parallel statistics collection and update (innodb)
+#
+
+select * from mysql.index_stats where table_name='lineitem'
+ order by index_name, prefix_arity;
+set debug_sync='RESET';
+
+let $innodb_storage_engine= 0;
+if (`SELECT UPPER(@@storage_engine) = 'INNODB'`)
+{
+ let $innodb_storage_engine= 1;
+}
+
+connect (con1, localhost, root,,);
+connect (con2, localhost, root,,);
+
+connection con1;
+set debug_sync='statistics_collection_start SIGNAL parked WAIT_FOR finish';
+use dbt3_s001;
+set use_stat_tables='preferably';
+--send analyze table lineitem persistent for all
+
+connection con2;
+set debug_sync='now WAIT_FOR parked';
+use dbt3_s001;
+set use_stat_tables='never';
+if ($innodb_storage_engine)
+{
+ select * from lineitem where l_orderkey=1 and l_partkey=156;
+ delete from lineitem where l_orderkey=1 and l_partkey=156;
+ select * from lineitem where l_orderkey=1 and l_partkey=156;
+}
+set debug_sync='now SIGNAL finish';
+
+connection con1;
+--disable_result_log
+--disable_warnings
+--reap
+--enable_warnings
+--enable_result_log
+
+connection default;
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+
+select * from mysql.index_stats where table_name='lineitem'
+ order by index_name, prefix_arity;
+
+#
+# Bug mdev-3891: deadlock for ANALYZE and SELECT over mysql.index_stats
+#
+
+set @save_global_use_stat_tables=@@global.use_stat_tables;
+set global use_stat_tables='preferably';
+set debug_sync='RESET';
+
+connect (con1, localhost, root,,);
+connect (con2, localhost, root,,);
+
+connection con1;
+set debug_sync='statistics_update_start SIGNAL parker WAIT_FOR go1 EXECUTE 1';
+set debug_sync='thr_multi_lock_after_thr_lock SIGNAL go2 EXECUTE 2';
+use dbt3_s001;
+--send analyze table lineitem persistent for all
+
+connection con2;
+set debug_sync='open_and_process_table WAIT_FOR parker';
+set debug_sync='statistics_read_start SIGNAL go1 WAIT_FOR go2';
+use dbt3_s001;
+--send select * from mysql.index_stats, lineitem where index_name= 'i_l_shipdate' and l_orderkey=1 and l_partkey=68
+
+connection con1;
+--disable_result_log
+--disable_warnings
+--reap
+--enable_warnings
+--enable_result_log
+
+connection con2;
+--disable_warnings
+--reap
+--enable_warnings
+
+connection default;
+disconnect con1;
+disconnect con2;
+set debug_sync='RESET';
+
+set global use_stat_tables=@save_global_use_stat_tables;
+
+DROP DATABASE dbt3_s001;
+
+use test;
+
+#
+# Bug mdev-4019: crash when executing in parallel ANALYZE and
+# SELECT * FROM information_schema.statistics
+#
+
+set @save_global_use_stat_tables=@@global.use_stat_tables;
+set global use_stat_tables='preferably';
+set debug_sync='RESET';
+
+create table t1 (a int, b int, key(a));
+insert t1 values (1,1),(2,2);
+
+analyze table t1;
+
+SET debug_sync='after_open_table_ignore_flush WAIT_FOR go';
+send select * from information_schema.statistics where table_schema='test';
+
+connect(con1, localhost, root);
+connection con1;
+select * from t1;
+SET DEBUG_SYNC= "now SIGNAL go";
+
+connection default;
+reap;
+
+connection default;
+disconnect con1;
+set debug_sync='RESET';
+
+drop table t1;
+set global use_stat_tables=@save_global_use_stat_tables;
+
+
+set use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/t/stat_tables_par_innodb.test b/mysql-test/t/stat_tables_par_innodb.test
new file mode 100644
index 00000000000..fd5833e4aaf
--- /dev/null
+++ b/mysql-test/t/stat_tables_par_innodb.test
@@ -0,0 +1,12 @@
+--source include/have_innodb.inc
+
+SET SESSION STORAGE_ENGINE='InnoDB';
+
+set @save_optimizer_switch_for_stat_tables_test=@@optimizer_switch;
+set optimizer_switch='extended_keys=on';
+
+--source stat_tables_par.test
+
+set optimizer_switch=@save_optimizer_switch_for_stat_tables_test;
+
+SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/t/stat_tables_partition.test b/mysql-test/t/stat_tables_partition.test
new file mode 100644
index 00000000000..1316e5cca11
--- /dev/null
+++ b/mysql-test/t/stat_tables_partition.test
@@ -0,0 +1,17 @@
+--source include/have_partition.inc
+
+--echo #
+--echo # Bug mdev-3866: valgrind complain from ANALYZE on a table with BIT field
+--echo #
+
+SET use_stat_tables = 'preferably';
+
+CREATE TABLE t1 (pk int PRIMARY KEY, a bit(1), INDEX idx(a)
+) ENGINE=MyISAM PARTITION BY KEY(pk) PARTITIONS 2;
+INSERT INTO t1 VALUES (1,1),(2,0),(3,0),(4,1);
+
+ANALYZE TABLE t1;
+
+SET use_stat_tables = DEFAULT;
+
+DROP TABLE t1;
diff --git a/mysql-test/t/stat_tables_rbr.test b/mysql-test/t/stat_tables_rbr.test
new file mode 100644
index 00000000000..4336c02c34b
--- /dev/null
+++ b/mysql-test/t/stat_tables_rbr.test
@@ -0,0 +1,31 @@
+--source include/have_binlog_format_row.inc
+--source include/have_innodb.inc
+--source include/have_partition.inc
+
+--echo #
+--echo # Bug mdev-463: assertion failure when running ANALYZE with RBR on
+--echo #
+
+SET GLOBAL use_stat_tables = PREFERABLY;
+
+--connect (con1,localhost,root,,)
+CREATE TABLE t1 (i INT) ENGINE=InnoDB;
+ANALYZE TABLE t1;
+
+# Cleanup
+DROP TABLE t1;
+SET GLOBAL use_stat_tables = DEFAULT;
+--disconnect con1
+
+--connection default
+
+SET use_stat_tables = PREFERABLY;
+
+CREATE TABLE t1 ( a INT ) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2;
+ALTER TABLE t1 ANALYZE PARTITION p1;
+--replace_regex /\/\* xid=.* \*\//\/* XID *\// /Server ver: .*, Binlog ver: .*/Server ver: #, Binlog ver: #/ /table_id: [0-9]+/table_id: #/
+SHOW BINLOG EVENTS;
+
+SET use_stat_tables = DEFAULT;
+
+DROP TABLE t1;
diff --git a/mysql-test/t/stat_tables_repl.test b/mysql-test/t/stat_tables_repl.test
new file mode 100644
index 00000000000..999c49d37cf
--- /dev/null
+++ b/mysql-test/t/stat_tables_repl.test
@@ -0,0 +1,58 @@
+--source include/have_stat_tables.inc
+--source include/master-slave.inc
+--source include/have_binlog_format_row.inc
+
+--echo #
+--echo # Bug mdev-485: unexpected failure with replication of DROP/ALTER table
+--echo # when RBR is on
+--echo #
+
+CREATE TABLE t1 ( a int, b int ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (2,20), (1,20), (3,30);
+ANALYZE TABLE t1;
+DROP TABLE t1;
+
+--sync_slave_with_master
+--connection master
+
+CREATE TABLE t1 ( a int, b int, INDEX idx1(b) ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (2,20), (1,20), (3,30);
+ANALYZE TABLE t1;
+DROP INDEX idx1 ON t1;
+
+--sync_slave_with_master
+--connection master
+
+DROP TABLE t1;
+
+CREATE TABLE t1 ( a int, b int, INDEX idx1(b) ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (2,20), (1,20), (3,30);
+ANALYZE TABLE t1;
+ALTER TABLE t1 DROP COLUMN b;
+
+--sync_slave_with_master
+--connection master
+
+DROP TABLE t1;
+
+CREATE TABLE t1 ( a int, b int, INDEX idx1(b) ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (2,20), (1,20), (3,30);
+ANALYZE TABLE t1;
+ALTER TABLE t1 RENAME to s;
+
+--sync_slave_with_master
+--connection master
+
+DROP TABLE s;
+
+CREATE TABLE t1 ( a int, b int, INDEX idx1(b) ) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (2,20), (1,20), (3,30);
+ANALYZE TABLE t1;
+ALTER TABLE t1 CHANGE COLUMN b c int ;
+
+--sync_slave_with_master
+--connection master
+
+DROP TABLE t1;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/t/statistics.test b/mysql-test/t/statistics.test
new file mode 100644
index 00000000000..4f2d0510c11
--- /dev/null
+++ b/mysql-test/t/statistics.test
@@ -0,0 +1,564 @@
+--source include/have_stat_tables.inc
+--source include/have_innodb.inc
+--disable_warnings
+drop table if exists t1,t2;
+--enable_warnings
+
+set @save_use_stat_tables=@@use_stat_tables;
+
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+
+set use_stat_tables='preferably';
+
+CREATE TABLE t1 (
+ a int NOT NULL PRIMARY KEY,
+ b varchar(32),
+ c char(16),
+ d date,
+ e double,
+ f bit(3),
+ INDEX idx1 (b, e),
+ INDEX idx2 (c, d),
+ INDEX idx3 (d),
+ INDEX idx4 (e, b, d)
+) ENGINE= MYISAM;
+
+INSERT INTO t1 VALUES
+ (0, NULL, NULL, NULL, NULL, NULL),
+ (7, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'dddddddd', '1990-05-15', 0.1, b'100'),
+ (17, 'vvvvvvvvvvvvv', 'aaaa', '1989-03-12', 0.01, b'101'),
+ (1, 'vvvvvvvvvvvvv', NULL, '1989-03-12', 0.01, b'100'),
+ (12, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'dddddddd', '1999-07-23', 0.112, b'001'),
+ (23, 'vvvvvvvvvvvvv', 'dddddddd', '1999-07-23', 0.1, b'100'),
+ (8, 'vvvvvvvvvvvvv', 'aaaa', '1999-07-23', 0.1, b'100'),
+ (22, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'aaaa', '1989-03-12', 0.112, b'001'),
+ (31, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'aaaa', '1999-07-23', 0.01, b'001'),
+ (10, NULL, 'aaaa', NULL, 0.01, b'010'),
+ (5, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'dddddddd', '1999-07-23', 0.1, b'100'),
+ (15, 'vvvvvvvvvvvvv', 'ccccccccc', '1990-05-15', 0.1, b'010'),
+ (30, NULL, 'bbbbbb', NULL, NULL, b'100'),
+ (38, 'zzzzzzzzzzzzzzzzzz', 'bbbbbb', NULL, NULL, NULL),
+ (18, 'zzzzzzzzzzzzzzzzzz', 'ccccccccc', '1990-05-15', 0.01, b'010'),
+ (9, 'yyy', 'bbbbbb', '1998-08-28', 0.01, NULL),
+ (29, 'vvvvvvvvvvvvv', 'dddddddd', '1999-07-23', 0.012, b'010'),
+ (3, 'yyy', 'dddddddd', '1990-05-15', 0.112, b'010'),
+ (39, 'zzzzzzzzzzzzzzzzzz', 'bbbbbb', NULL, 0.01, b'100'),
+ (14, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'ccccccccc', '1990-05-15', 0.1, b'100'),
+ (40, 'zzzzzzzzzzzzzzzzzz', 'bbbbbb', '1989-03-12', NULL, NULL),
+ (44, NULL, 'aaaa', '1989-03-12', NULL, b'010'),
+ (19, 'vvvvvvvvvvvvv', 'ccccccccc', '1990-05-15', 0.012, b'011'),
+ (21, 'zzzzzzzzzzzzzzzzzz', 'dddddddd', '1989-03-12', 0.112, b'100'),
+ (45, NULL, NULL, '1989-03-12', NULL, b'011'),
+ (2, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'ccccccccc', '1990-05-15', 0.1, b'001'),
+ (35, 'yyy', 'aaaa', '1990-05-15', 0.05, b'011'),
+ (4, 'vvvvvvvvvvvvv', 'dddddddd', '1999-07-23', 0.01, b'101'),
+ (47, NULL, 'aaaa', '1990-05-15', 0.05, b'010'),
+ (42, NULL, 'ccccccccc', '1989-03-12', 0.01, b'010'),
+ (32, NULL, 'bbbbbb', '1990-05-15', 0.01, b'011'),
+ (49, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww' , 'aaaa', '1990-05-15', NULL, NULL),
+ (43, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww' , 'bbbbbb', '1990-05-15', NULL, b'100'),
+ (37, 'yyy', NULL, '1989-03-12', 0.05, b'011'),
+ (41, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'ccccccccc', '1990-05-15', 0.05, NULL),
+ (34, 'yyy', NULL, NULL, NULL, NULL),
+ (33, 'zzzzzzzzzzzzzzzzzz', 'dddddddd', '1989-03-12', 0.05, b'011'),
+ (24, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'dddddddd', '1990-05-15', 0.01, b'101'),
+ (11, 'yyy', 'ccccccccc', '1999-07-23', 0.1, NULL),
+ (25, 'zzzzzzzzzzzzzzzzzz', 'bbb', '1989-03-12', 0.01, b'101');
+
+ANALYZE TABLE t1;
+
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+SELECT COUNT(*) FROM t1;
+
+SELECT * FROM mysql.column_stats
+ WHERE db_name='test' AND table_name='t1' AND column_name='a';
+SELECT MIN(t1.a), MAX(t1.a),
+ (SELECT COUNT(*) FROM t1 WHERE t1.b IS NULL) /
+ (SELECT COUNT(*) FROM t1) AS "NULLS_RATIO(t1.a)",
+ (SELECT COUNT(t1.a) FROM t1) /
+ (SELECT COUNT(DISTINCT t1.a) FROM t1) AS "AVG_FREQUENCY(t1.a)"
+FROM t1;
+
+SELECT * FROM mysql.column_stats
+ WHERE db_name='test' AND table_name='t1' AND column_name='b';
+SELECT MIN(t1.b), MAX(t1.b),
+ (SELECT COUNT(*) FROM t1 WHERE t1.b IS NULL) /
+ (SELECT COUNT(*) FROM t1) AS "NULLS_RATIO(t1.b)",
+ (SELECT COUNT(t1.b) FROM t1) /
+ (SELECT COUNT(DISTINCT t1.b) FROM t1) AS "AVG_FREQUENCY(t1.b)"
+FROM t1;
+
+SELECT * FROM mysql.column_stats
+ WHERE db_name='test' AND table_name='t1' AND column_name='c';
+SELECT MIN(t1.c), MAX(t1.c),
+ (SELECT COUNT(*) FROM t1 WHERE t1.c IS NULL) /
+ (SELECT COUNT(*) FROM t1) AS "NULLS_RATIO(t1.c)",
+ (SELECT COUNT(t1.c) FROM t1) /
+ (SELECT COUNT(DISTINCT t1.c) FROM t1) AS "AVG_FREQUENCY(t1.c)"
+FROM t1;
+
+SELECT * FROM mysql.column_stats
+ WHERE db_name='test' AND table_name='t1' AND column_name='d';
+SELECT MIN(t1.d), MAX(t1.d),
+ (SELECT COUNT(*) FROM t1 WHERE t1.d IS NULL) /
+ (SELECT COUNT(*) FROM t1) AS "NULLS_RATIO(t1.d)",
+ (SELECT COUNT(t1.d) FROM t1) /
+ (SELECT COUNT(DISTINCT t1.d) FROM t1) AS "AVG_FREQUENCY(t1.d)"
+FROM t1;
+
+SELECT * FROM mysql.column_stats
+ WHERE db_name='test' AND table_name='t1' AND column_name='e';
+SELECT MIN(t1.e), MAX(t1.e),
+ (SELECT COUNT(*) FROM t1 WHERE t1.e IS NULL) /
+ (SELECT COUNT(*) FROM t1) AS "NULLS_RATIO(t1.e)",
+ (SELECT COUNT(t1.e) FROM t1) /
+ (SELECT COUNT(DISTINCT t1.e) FROM t1) AS "AVG_FREQUENCY(t1.e)"
+FROM t1;
+
+SELECT * FROM mysql.index_stats
+ WHERE db_name='test' AND table_name='t1' AND index_name='idx1';
+SELECT
+ (SELECT COUNT(*) FROM t1 WHERE t1.b IS NOT NULL) /
+ (SELECT COUNT(DISTINCT t1.b) FROM t1 WHERE t1.b IS NOT NULL)
+ AS 'ARITY 1',
+ (SELECT COUNT(*) FROM t1 WHERE t1.b IS NOT NULL AND t1.e IS NOT NULL) /
+ (SELECT COUNT(DISTINCT t1.b, t1.e) FROM t1
+ WHERE t1.b IS NOT NULL AND t1.e IS NOT NULL)
+ AS 'ARITY 2';
+
+SELECT * FROM mysql.index_stats
+ WHERE db_name='test' AND table_name='t1' AND index_name='idx2';
+SELECT
+ (SELECT COUNT(*) FROM t1 WHERE t1.c IS NOT NULL) /
+ (SELECT COUNT(DISTINCT t1.c) FROM t1 WHERE t1.c IS NOT NULL)
+ AS 'ARITY 1',
+ (SELECT COUNT(*) FROM t1 WHERE t1.c IS NOT NULL AND t1.d IS NOT NULL) /
+ (SELECT COUNT(DISTINCT t1.c, t1.d) FROM t1
+ WHERE t1.c IS NOT NULL AND t1.d IS NOT NULL)
+ AS 'ARITY 2';
+
+SELECT * FROM mysql.index_stats
+ WHERE db_name='test' AND table_name='t1' AND index_name='idx3';
+SELECT
+ (SELECT COUNT(*) FROM t1 WHERE t1.d IS NOT NULL) /
+ (SELECT COUNT(DISTINCT t1.d) FROM t1 WHERE t1.d IS NOT NULL)
+ AS 'ARITY 1';
+
+SELECT * FROM mysql.index_stats
+ WHERE db_name='test' AND table_name='t1' AND index_name='idx4';
+SELECT
+ (SELECT COUNT(*) FROM t1 WHERE t1.e IS NOT NULL) /
+ (SELECT COUNT(DISTINCT t1.e) FROM t1 WHERE t1.e IS NOT NULL)
+ AS 'ARITY 1',
+ (SELECT COUNT(*) FROM t1 WHERE t1.e IS NOT NULL AND t1.b IS NOT NULL) /
+ (SELECT COUNT(DISTINCT t1.e, t1.b) FROM t1
+ WHERE t1.e IS NOT NULL AND t1.b IS NOT NULL)
+ AS 'ARITY 2',
+ (SELECT COUNT(*) FROM t1
+ WHERE t1.e IS NOT NULL AND t1.b IS NOT NULL AND t1.d IS NOT NULL) /
+ (SELECT COUNT(DISTINCT t1.e, t1.b, t1.d) FROM t1
+ WHERE t1.e IS NOT NULL AND t1.b IS NOT NULL AND t1.d IS NOT NULL)
+ AS 'ARITY 3';
+
+CREATE TABLE t3 (
+ a int NOT NULL PRIMARY KEY,
+ b varchar(32),
+ c char(16),
+ INDEX idx (c)
+) ENGINE=MYISAM;
+
+INSERT INTO t3 VALUES
+ (0, NULL, NULL),
+ (7, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'dddddddd'),
+ (17, 'vvvvvvvvvvvvv', 'aaaa'),
+ (1, 'vvvvvvvvvvvvv', NULL),
+ (12, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'dddddddd'),
+ (23, 'vvvvvvvvvvvvv', 'dddddddd'),
+ (8, 'vvvvvvvvvvvvv', 'aaaa'),
+ (22, 'xxxxxxxxxxxxxxxxxxxxxxxxxx', 'aaaa'),
+ (31, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'aaaa'),
+ (10, NULL, 'aaaa'),
+ (5, 'wwwwwwwwwwwwwwwwwwwwwwwwwwww', 'dddddddd'),
+ (15, 'vvvvvvvvvvvvv', 'ccccccccc'),
+ (30, NULL, 'bbbbbb'),
+ (38, 'zzzzzzzzzzzzzzzzzz', 'bbbbbb'),
+ (18, 'zzzzzzzzzzzzzzzzzz', 'ccccccccc'),
+ (9, 'yyy', 'bbbbbb'),
+ (29, 'vvvvvvvvvvvvv', 'dddddddd');
+
+ANALYZE TABLE t3;
+
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+ALTER TABLE t1 RENAME TO s1;
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+RENAME TABLE s1 TO t1;
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+DROP TABLE t3;
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+
+CREATE TEMPORARY TABLE t0 (
+ a int NOT NULL PRIMARY KEY,
+ b varchar(32)
+);
+INSERT INTO t0 SELECT a,b FROM t1;
+
+ALTER TABLE t1 CHANGE COLUMN b x varchar(32),
+ CHANGE COLUMN e y double;
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.column_stats;
+
+ALTER TABLE t1 CHANGE COLUMN x b varchar(32),
+ CHANGE COLUMN y e double;
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.column_stats;
+
+ALTER TABLE t1 RENAME TO s1, CHANGE COLUMN b x varchar(32);
+SHOW CREATE TABLE s1;
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+ALTER TABLE s1 RENAME TO t1, CHANGE COLUMN x b varchar(32);
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+ALTER TABLE t1 CHANGE COLUMN b x varchar(30);
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+ALTER TABLE t1 CHANGE COLUMN x b varchar(32);
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS(b) INDEXES(idx1, idx4);
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+eval
+SELECT * INTO OUTFILE '$MYSQLTEST_VARDIR/tmp/save_column_stats'
+ FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\n'
+ FROM mysql.column_stats WHERE column_name='b';
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+eval
+SELECT * INTO OUTFILE '$MYSQLTEST_VARDIR/tmp/save_index_stats'
+ FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\n'
+ FROM mysql.index_stats WHERE index_name IN ('idx1', 'idx4');
+
+ALTER TABLE t1 CHANGE COLUMN b x varchar(30);
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+ALTER TABLE t1 CHANGE COLUMN x b varchar(32);
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+eval
+LOAD DATA INFILE '$MYSQLTEST_VARDIR/tmp/save_column_stats'
+ INTO TABLE mysql.column_stats
+ FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\n';
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+eval
+LOAD DATA INFILE '$MYSQLTEST_VARDIR/tmp/save_index_stats'
+ INTO TABLE mysql.index_stats
+ FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\n';
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+remove_file $MYSQLTEST_VARDIR/tmp/save_column_stats;
+remove_file $MYSQLTEST_VARDIR/tmp/save_index_stats;
+
+
+ALTER TABLE t1 DROP COLUMN b;
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+DROP INDEX idx2 ON t1;
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.index_stats;
+
+DROP INDEX idx1 ON t1;
+DROP INDEX idx4 ON t1;
+SHOW CREATE TABLE t1;
+
+ALTER TABLE t1 ADD COLUMN b varchar(32);
+CREATE INDEX idx1 ON t1(b, e);
+CREATE INDEX idx2 ON t1(c, d);
+CREATE INDEX idx4 ON t1(e, b, d);
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS(b) INDEXES(idx1, idx2, idx4);
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+UPDATE t1 SET b=(SELECT b FROM t0 WHERE t0.a= t1.a);
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS(b) INDEXES(idx1, idx2, idx4);
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+ALTER TABLE t1 DROP COLUMN b,
+ DROP INDEX idx1, DROP INDEX idx2, DROP INDEX idx4;
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+ALTER TABLE t1 ADD COLUMN b varchar(32);
+ALTER TABLE t1
+ ADD INDEX idx1 (b, e), ADD INDEX idx2 (c, d), ADD INDEX idx4 (e, b, d);
+UPDATE t1 SET b=(SELECT b FROM t0 WHERE t0.a= t1.a);
+SHOW CREATE TABLE t1;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS(b) INDEXES(idx1, idx2, idx4);
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS() INDEXES();
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS(c,e,b) INDEXES(idx2,idx4);
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+DELETE FROM mysql.index_stats WHERE table_name='t1' AND index_name='primary';
+SELECT * FROM mysql.index_stats;
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS() INDEXES(primary);
+SELECT * FROM mysql.index_stats;
+
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS ALL INDEXES ALL;
+
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+CREATE TABLE t2 LIKE t1;
+ALTER TABLE t2 ENGINE=InnoDB;
+INSERT INTO t2 SELECT * FROM t1;
+
+set optimizer_switch='extended_keys=off';
+
+ANALYZE TABLE t2;
+
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats ORDER BY column_name;
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+
+set optimizer_switch='extended_keys=on';
+
+ANALYZE TABLE t2;
+
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats ORDER BY column_name;
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+
+ALTER TABLE t2 DROP PRIMARY KEY, DROP INDEX idx1;
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+
+UPDATE t2 SET b=0 WHERE b IS NULL;
+ALTER TABLE t2 ADD PRIMARY KEY (a,b);
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+
+ANALYZE TABLE t2 PERSISTENT FOR COLUMNS() INDEXES ALL;
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+
+ALTER TABLE t2 CHANGE COLUMN b b varchar(30);
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+
+ANALYZE TABLE t2 PERSISTENT FOR COLUMNS ALL INDEXES ALL;
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+
+ALTER TABLE t2 CHANGE COLUMN b b varchar(32);
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+
+ANALYZE TABLE t2 PERSISTENT FOR COLUMNS ALL INDEXES ALL;
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+
+ALTER TABLE t2 DROP COLUMN b;
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+
+ANALYZE TABLE t2 PERSISTENT FOR COLUMNS() INDEXES ALL;
+SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
+
+set optimizer_switch='extended_keys=off';
+
+ALTER TABLE t1
+ DROP INDEX idx1,
+ DROP INDEX idx4;
+ALTER TABLE t1
+ MODIFY COLUMN b text,
+ ADD INDEX idx1 (b(4), e),
+ ADD INDEX idx4 (e, b(4), d);
+
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+ANALYZE TABLE t1;
+
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+
+ANALYZE TABLE mysql.column_stats PERSISTENT FOR ALL;
+
+ANALYZE TABLE mysql.column_stats;
+
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+set use_stat_tables='never';
+
+ANALYZE TABLE t1 PERSISTENT FOR ALL;
+
+SELECT * FROM mysql.table_stats;
+SELECT * FROM mysql.column_stats;
+SELECT * FROM mysql.index_stats;
+
+
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+
+DROP TABLE t1,t2;
+
+set names utf8;
+
+CREATE DATABASE world;
+
+use world;
+
+--source include/world_schema_utf8.inc
+
+--disable_query_log
+--disable_result_log
+--disable_warnings
+--source include/world.inc
+--enable_warnings
+--enable_result_log
+--enable_query_log
+
+set use_stat_tables='preferably';
+
+--disable_result_log
+ANALYZE TABLE Country, City, CountryLanguage;
+--enable_result_log
+
+SELECT UPPER(db_name), UPPER(table_name), cardinality
+ FROM mysql.table_stats;
+SELECT UPPER(db_name), UPPER(table_name),
+ column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency
+ FROM mysql.column_stats;
+SELECT UPPER(db_name), UPPER(table_name),
+ index_name, prefix_arity, avg_frequency
+ FROM mysql.index_stats;
+
+use test;
+
+set use_stat_tables='never';
+
+CREATE DATABASE world_innodb;
+
+use world_innodb;
+
+--source include/world_schema_utf8.inc
+
+ALTER TABLE Country ENGINE=InnoDB;
+ALTER TABLE City ENGINE=InnoDB;
+ALTER TABLE CountryLanguage ENGINE=InnoDB;
+
+--disable_query_log
+--disable_result_log
+--disable_warnings
+--source include/world.inc
+--enable_warnings
+--enable_result_log
+--enable_query_log
+
+set use_stat_tables='preferably';
+
+--disable_result_log
+ANALYZE TABLE Country, City, CountryLanguage;
+--enable_result_log
+
+SELECT UPPER(db_name), UPPER(table_name), cardinality
+ FROM mysql.table_stats;
+SELECT UPPER(db_name), UPPER(table_name),
+ column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency
+ FROM mysql.column_stats;
+SELECT UPPER(db_name), UPPER(table_name),
+ index_name, prefix_arity, avg_frequency
+ FROM mysql.index_stats;
+
+use test;
+
+DROP DATABASE world;
+SELECT UPPER(db_name), UPPER(table_name), cardinality
+ FROM mysql.table_stats;
+SELECT UPPER(db_name), UPPER(table_name),
+ column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency
+ FROM mysql.column_stats;
+SELECT UPPER(db_name), UPPER(table_name),
+ index_name, prefix_arity, avg_frequency
+ FROM mysql.index_stats;
+
+DROP DATABASE world_innodb;
+SELECT UPPER(db_name), UPPER(table_name), cardinality
+ FROM mysql.table_stats;
+SELECT UPPER(db_name), UPPER(table_name),
+ column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency
+ FROM mysql.column_stats;
+SELECT UPPER(db_name), UPPER(table_name),
+ index_name, prefix_arity, avg_frequency
+ FROM mysql.index_stats;
+
+DELETE FROM mysql.table_stats;
+DELETE FROM mysql.column_stats;
+DELETE FROM mysql.index_stats;
+
+set use_stat_tables=@save_use_stat_tables;
+
+ \ No newline at end of file
diff --git a/mysql-test/t/str_to_datetime_457.test b/mysql-test/t/str_to_datetime_457.test
new file mode 100644
index 00000000000..dd25f98ebdd
--- /dev/null
+++ b/mysql-test/t/str_to_datetime_457.test
@@ -0,0 +1,26 @@
+#
+# MDEV-457 Inconsistent data truncation on datetime values with fractional seconds represented as strings with no delimiters
+# (and other problems with str_to_datetime)
+#
+
+# first was ok, second was not
+select cast('01:02:03 ' as time), cast('01:02:03 ' as time);
+# first two were ok, third was not
+select cast('2002-011-012' as date), cast('2002.11.12' as date), cast('2002.011.012' as date);
+# only two microsecond digits were ok, third was truncated with a warning
+select cast('2012103123595912' as datetime(6)), cast('20121031235959123' as datetime(6));
+# zero string date was considered 'out of range'. Must be either ok or invalid format
+select cast(0 as date), cast('0000-00-00' as date), cast('0' as date);
+# first was ok, second was not
+select extract(hour from '100000:02:03'), extract(hour from '100000:02:03 ');
+
+--echo #
+--echo # backward compatibility craziness
+--echo #
+select cast('12:00:00.12.34.56' as time); # was 12:00:00
+select cast('12:00:00 12.34.56' as time); # was 12:34:56
+select cast('12:00:00-12.34.56' as time); # was 12:00:00
+select cast('12:00:00.12.34.56' as datetime);
+select cast('12:00:00-12.34.56' as datetime);
+select cast('12:00:00 12.34.56' as datetime);
+select cast('12:00:00.123456' as time);
diff --git a/mysql-test/t/subselect_innodb.test b/mysql-test/t/subselect_innodb.test
index 83c36b16163..fc07a088a15 100644
--- a/mysql-test/t/subselect_innodb.test
+++ b/mysql-test/t/subselect_innodb.test
@@ -244,6 +244,54 @@ drop procedure p1;
drop tables t1,t2,t3;
--echo #
+--echo # Bug #58756
+--echo # Crash in heap_rrnd on query with HAVING ... IN (subquery) + LIMIT
+--echo #
+
+CREATE TABLE t1 (
+ col_time_key time DEFAULT NULL,
+ col_datetime_key datetime DEFAULT NULL,
+ col_varchar_nokey varchar(1) DEFAULT NULL,
+ KEY col_time_key (col_time_key),
+ KEY col_datetime_key (col_datetime_key)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+INSERT INTO t1 VALUES ('17:53:30','2005-11-10 12:40:29','h');
+INSERT INTO t1 VALUES ('11:35:49','2009-04-25 00:00:00','b');
+INSERT INTO t1 VALUES (NULL,'2002-11-27 00:00:00','s');
+INSERT INTO t1 VALUES ('06:01:40','2004-01-26 20:32:32','e');
+INSERT INTO t1 VALUES ('05:45:11','2007-10-26 11:41:40','j');
+INSERT INTO t1 VALUES ('00:00:00','2005-10-07 00:00:00','e');
+INSERT INTO t1 VALUES ('00:00:00','2000-07-15 05:00:34','f');
+INSERT INTO t1 VALUES ('06:11:01','2000-04-03 16:33:32','v');
+INSERT INTO t1 VALUES ('13:02:46',NULL,'x');
+INSERT INTO t1 VALUES ('21:44:25','2001-04-25 01:26:12','m');
+INSERT INTO t1 VALUES ('22:43:58','2000-12-27 00:00:00','c');
+
+CREATE TABLE t2 (
+ col_time_key time DEFAULT NULL,
+ col_datetime_key datetime DEFAULT NULL,
+ col_varchar_nokey varchar(1) DEFAULT NULL,
+ KEY col_time_key (col_time_key),
+ KEY col_datetime_key (col_datetime_key)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+INSERT INTO t2 VALUES ('11:28:45','2004-10-11 18:13:16','w');
+
+SELECT col_time_key, col_datetime_key
+FROM
+( SELECT * FROM t1 ) AS table1
+HAVING ( 'r' , 'e' ) IN
+ ( SELECT col_varchar_nokey , col_varchar_nokey FROM t2 )
+ORDER BY col_datetime_key
+LIMIT 10;
+
+DROP TABLE t1;
+DROP TABLE t2;
+
+--echo # End of Bug #58756
+
+--echo #
--echo # Bug#60085 crash in Item::save_in_field() with time data type
--echo #
diff --git a/mysql-test/t/system_mysql_db_fix40123.test b/mysql-test/t/system_mysql_db_fix40123.test
index 5c82f0022b8..15e182f778f 100644
--- a/mysql-test/t/system_mysql_db_fix40123.test
+++ b/mysql-test/t/system_mysql_db_fix40123.test
@@ -59,6 +59,11 @@ CREATE TABLE time_zone_transition_type ( Time_zone_id int unsigned NOT NULL,
CREATE TABLE time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
+CREATE TABLE table_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, cardinality bigint(21) unsigned DEFAULT NULL, PRIMARY KEY (db_name,table_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Tables';
+
+CREATE TABLE column_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, column_name varchar(64) NOT NULL, min_value varchar(255) DEFAULT NULL, max_value varchar(255) DEFAULT NULL, nulls_ratio double DEFAULT NULL, avg_length double DEFAULT NULL, avg_frequency double DEFAULT NULL, PRIMARY KEY (db_name,table_name,column_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Columns';
+
+CREATE TABLE index_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, index_name varchar(64) NOT NULL, prefix_arity int(11) unsigned NOT NULL, avg_frequency double DEFAULT NULL, PRIMARY KEY (db_name,table_name,index_name,prefix_arity) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Indexes';
# Run the mysql_fix_privilege_tables.sql using "mysql --force"
--exec $MYSQL --force test < $MYSQL_FIX_PRIVILEGE_TABLES
@@ -72,7 +77,7 @@ CREATE TABLE time_zone_leap_second ( Transition_time bigint signed NOT NULL,
-- disable_query_log
# Drop all tables created by this test
-DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, ndb_binlog_index, proxies_priv;
+DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, ndb_binlog_index, proxies_priv, table_stats, column_stats, index_stats;
-- enable_query_log
diff --git a/mysql-test/t/system_mysql_db_fix50030.test b/mysql-test/t/system_mysql_db_fix50030.test
index e8cfe6a6cf3..bf7a6ec2d93 100644
--- a/mysql-test/t/system_mysql_db_fix50030.test
+++ b/mysql-test/t/system_mysql_db_fix50030.test
@@ -66,6 +66,12 @@ CREATE TABLE servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) N
INSERT INTO servers VALUES ('test','localhost','test','root','', 0,'','mysql','root');
+CREATE TABLE table_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, cardinality bigint(21) unsigned DEFAULT NULL, PRIMARY KEY (db_name,table_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Tables';
+
+CREATE TABLE column_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, column_name varchar(64) NOT NULL, min_value varchar(255) DEFAULT NULL, max_value varchar(255) DEFAULT NULL, nulls_ratio double DEFAULT NULL, avg_length double DEFAULT NULL, avg_frequency double DEFAULT NULL, PRIMARY KEY (db_name,table_name,column_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Columns';
+
+CREATE TABLE index_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, index_name varchar(64) NOT NULL, prefix_arity int(11) unsigned NOT NULL, avg_frequency double DEFAULT NULL, PRIMARY KEY (db_name,table_name,index_name,prefix_arity) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Indexes';
+
# Run the mysql_fix_privilege_tables.sql using "mysql --force"
--exec $MYSQL --force test < $MYSQL_FIX_PRIVILEGE_TABLES
@@ -78,7 +84,7 @@ INSERT INTO servers VALUES ('test','localhost','test','root','', 0,'','mysql','r
-- disable_query_log
# Drop all tables created by this test
-DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, ndb_binlog_index, proxies_priv;
+DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, ndb_binlog_index, proxies_priv, table_stats, column_stats, index_stats;
-- enable_query_log
diff --git a/mysql-test/t/system_mysql_db_fix50117.test b/mysql-test/t/system_mysql_db_fix50117.test
index 69ad68faaa8..8090b43a850 100644
--- a/mysql-test/t/system_mysql_db_fix50117.test
+++ b/mysql-test/t/system_mysql_db_fix50117.test
@@ -85,6 +85,12 @@ CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_b
CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM;
+CREATE TABLE table_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, cardinality bigint(21) unsigned DEFAULT NULL, PRIMARY KEY (db_name,table_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Tables';
+
+CREATE TABLE column_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, column_name varchar(64) NOT NULL, min_value varchar(255) DEFAULT NULL, max_value varchar(255) DEFAULT NULL, nulls_ratio double DEFAULT NULL, avg_length double DEFAULT NULL, avg_frequency double DEFAULT NULL, PRIMARY KEY (db_name,table_name,column_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Columns';
+
+CREATE TABLE index_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, index_name varchar(64) NOT NULL, prefix_arity int(11) unsigned NOT NULL, avg_frequency double DEFAULT NULL, PRIMARY KEY (db_name,table_name,index_name,prefix_arity) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Indexes';
+
# Run the mysql_fix_privilege_tables.sql using "mysql --force"
--exec $MYSQL --force test < $MYSQL_FIX_PRIVILEGE_TABLES
@@ -97,7 +103,7 @@ CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL,
-- disable_query_log
# Drop all tables created by this test
-DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, ndb_binlog_index, proxies_priv;
+DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, ndb_binlog_index, proxies_priv, table_stats, column_stats, index_stats;
-- enable_query_log
diff --git a/mysql-test/t/type_timestamp.test b/mysql-test/t/type_timestamp.test
index 575c30431b6..1c17743e7f1 100644
--- a/mysql-test/t/type_timestamp.test
+++ b/mysql-test/t/type_timestamp.test
@@ -86,17 +86,16 @@ drop table t1;
# Test for TIMESTAMP column with default now() and on update now() clauses
#
-# These statements should fail.
---error 1293
create table t1 (t1 timestamp, t2 timestamp default now());
---error 1293
+drop table t1;
create table t1 (t1 timestamp, t2 timestamp on update now());
---error 1293
+drop table t1;
create table t1 (t1 timestamp, t2 timestamp default now() on update now());
---error 1293
+drop table t1;
create table t1 (t1 timestamp default now(), t2 timestamp on update now());
---error 1293
+drop table t1;
create table t1 (t1 timestamp on update now(), t2 timestamp default now() on update now());
+drop table t1;
# Let us test TIMESTAMP auto-update behaviour
# Also we will test behaviour of TIMESTAMP field in SHOW CREATE TABLE and
diff --git a/mysql-test/t/type_timestamp_hires.test b/mysql-test/t/type_timestamp_hires.test
index 8e1f8586956..17a2c3e1f1f 100644
--- a/mysql-test/t/type_timestamp_hires.test
+++ b/mysql-test/t/type_timestamp_hires.test
@@ -14,3 +14,18 @@ insert t1 values ();
select * from t1;
drop table t1;
+#
+# MDEV-438 Microseconds: Precision is ignored in CURRENT_TIMESTAMP(N) when it is given as a default column value
+#
+create table t1 (a timestamp(5) default current_timestamp); drop table t1;
+create table t1 (a timestamp(5) default current_timestamp()); drop table t1;
+--error ER_INVALID_DEFAULT
+create table t1 (a timestamp(5) default current_timestamp(2));
+create table t1 (a timestamp(5) default current_timestamp(5)); drop table t1;
+create table t1 (a timestamp(5) default current_timestamp(6)); drop table t1;
+create table t1 (a timestamp(5) on update current_timestamp); drop table t1;
+create table t1 (a timestamp(5) on update current_timestamp()); drop table t1;
+--error ER_INVALID_ON_UPDATE
+create table t1 (a timestamp(5) on update current_timestamp(3));
+create table t1 (a timestamp(5) on update current_timestamp(5)); drop table t1;
+create table t1 (a timestamp(5) on update current_timestamp(6)); drop table t1;
diff --git a/mysql-test/t/user_var.test b/mysql-test/t/user_var.test
index c6c4e4d9d2f..c6930a7e468 100644
--- a/mysql-test/t/user_var.test
+++ b/mysql-test/t/user_var.test
@@ -452,4 +452,18 @@ SELECT DISTINCT POW(COUNT(distinct a), @a:=(SELECT 1 FROM t1 LEFT JOIN t1 AS t2
SELECT @a;
DROP TABLE t1;
+--echo #
+--echo # Check that used memory extends if we set a variable
+--echo #
+
+# Execute twice so number stablizes a bit
+let $tmp= `select memory_used from information_schema.processlist`;
+set @var= repeat('a',20000);
+let $tmp2= `select memory_used from information_schema.processlist`;
+--disable_query_log
+--disable_column_names
+eval select $tmp < $tmp2;
+--enable_column_names
+--enable_query_log
+
--echo End of 5.5 tests
diff --git a/mysql-test/t/xa_binlog.test b/mysql-test/t/xa_binlog.test
index 48f1dc6dfaa..430d45ab86a 100644
--- a/mysql-test/t/xa_binlog.test
+++ b/mysql-test/t/xa_binlog.test
@@ -27,6 +27,6 @@ SELECT * FROM t1 ORDER BY a;
--replace_column 2 # 5 #
--replace_regex /xid=[0-9]+/xid=XX/
-SHOW BINLOG EVENTS LIMIT 1,9;
+SHOW BINLOG EVENTS LIMIT 2,9;
DROP TABLE t1;
diff --git a/mysys/array.c b/mysys/array.c
index c969da83586..f72b8951870 100644
--- a/mysys/array.c
+++ b/mysys/array.c
@@ -22,15 +22,16 @@
Initiate dynamic array
SYNOPSIS
- init_dynamic_array2()
+ my_init_dynamic_array2()
array Pointer to an array
element_size Size of element
init_buffer Initial buffer pointer
init_alloc Number of initial elements
alloc_increment Increment for adding new elements
+ my_flags Flags to my_malloc
DESCRIPTION
- init_dynamic_array() initiates array and allocate space for
+ my_init_dynamic_array2() initiates array and allocate space for
init_alloc eilements.
Array is usable even if space allocation failed, hence, the
function never returns TRUE.
@@ -40,11 +41,11 @@
FALSE Ok
*/
-my_bool init_dynamic_array2(DYNAMIC_ARRAY *array, uint element_size,
+my_bool my_init_dynamic_array2(DYNAMIC_ARRAY *array, uint element_size,
void *init_buffer, uint init_alloc,
- uint alloc_increment)
+ uint alloc_increment, myf my_flags)
{
- DBUG_ENTER("init_dynamic_array2");
+ DBUG_ENTER("my_init_dynamic_array2");
if (!alloc_increment)
{
alloc_increment=max((8192-MALLOC_OVERHEAD)/element_size,16);
@@ -55,6 +56,7 @@ my_bool init_dynamic_array2(DYNAMIC_ARRAY *array, uint element_size,
array->max_element=init_alloc;
array->alloc_increment=alloc_increment;
array->size_of_element=element_size;
+ array->malloc_flags= my_flags;
if ((array->buffer= init_buffer))
DBUG_RETURN(FALSE);
/*
@@ -62,18 +64,12 @@ my_bool init_dynamic_array2(DYNAMIC_ARRAY *array, uint element_size,
should not throw an error
*/
if (init_alloc &&
- !(array->buffer= (uchar*) my_malloc(element_size*init_alloc, MYF(0))))
+ !(array->buffer= (uchar*) my_malloc(element_size*init_alloc,
+ MYF(my_flags))))
array->max_element=0;
DBUG_RETURN(FALSE);
}
-my_bool init_dynamic_array(DYNAMIC_ARRAY *array, uint element_size,
- uint init_alloc, uint alloc_increment)
-{
- /* placeholder to preserve ABI */
- return my_init_dynamic_array_ci(array, element_size, init_alloc,
- alloc_increment);
-}
/*
Insert element at the end of array. Allocate memory if needed.
@@ -137,7 +133,7 @@ uchar *alloc_dynamic(DYNAMIC_ARRAY *array)
if (!(new_ptr= (char *) my_malloc((array->max_element+
array->alloc_increment) *
array->size_of_element,
- MYF(MY_WME))))
+ MYF(array->malloc_flags | MY_WME))))
DBUG_RETURN(0);
memcpy(new_ptr, array->buffer,
array->elements * array->size_of_element);
@@ -146,7 +142,8 @@ uchar *alloc_dynamic(DYNAMIC_ARRAY *array)
my_realloc(array->buffer,(array->max_element+
array->alloc_increment)*
array->size_of_element,
- MYF(MY_WME | MY_ALLOW_ZERO_PTR))))
+ MYF(MY_WME | MY_ALLOW_ZERO_PTR |
+ array->malloc_flags))))
DBUG_RETURN(0);
array->buffer= (uchar*) new_ptr;
array->max_element+=array->alloc_increment;
@@ -242,14 +239,15 @@ my_bool allocate_dynamic(DYNAMIC_ARRAY *array, uint max_elements)
*/
if (!(new_ptr= (uchar *) my_malloc(size *
array->size_of_element,
- MYF(MY_WME))))
+ MYF(array->malloc_flags | MY_WME))))
DBUG_RETURN(0);
memcpy(new_ptr, array->buffer,
array->elements * array->size_of_element);
}
else if (!(new_ptr= (uchar*) my_realloc(array->buffer,size*
array->size_of_element,
- MYF(MY_WME | MY_ALLOW_ZERO_PTR))))
+ MYF(MY_WME | MY_ALLOW_ZERO_PTR |
+ array->malloc_flags))))
DBUG_RETURN(TRUE);
array->buffer= new_ptr;
array->max_element= size;
@@ -347,7 +345,7 @@ void freeze_size(DYNAMIC_ARRAY *array)
{
array->buffer=(uchar*) my_realloc(array->buffer,
elements*array->size_of_element,
- MYF(MY_WME));
+ MYF(MY_WME | array->malloc_flags));
array->max_element=elements;
}
}
diff --git a/mysys/default.c b/mysys/default.c
index c7ac0d89462..a90f428eca5 100644
--- a/mysys/default.c
+++ b/mysys/default.c
@@ -520,7 +520,7 @@ int my_load_defaults(const char *conf_file, const char **groups,
uint args_sep= my_getopt_use_args_separator ? 1 : 0;
DBUG_ENTER("load_defaults");
- init_alloc_root(&alloc,512,0);
+ init_alloc_root(&alloc, 512, 0, MYF(0));
if ((dirs= init_default_directories(&alloc)) == NULL)
goto err;
/*
@@ -566,7 +566,7 @@ int my_load_defaults(const char *conf_file, const char **groups,
for (; *groups ; groups++)
group.count++;
- if (my_init_dynamic_array(&args, sizeof(char*),*argc, 32))
+ if (my_init_dynamic_array(&args, sizeof(char*),*argc, 32, MYF(0)))
goto err;
ctx.alloc= &alloc;
@@ -1043,7 +1043,7 @@ void my_print_default_files(const char *conf_file)
{
const char **dirs;
MEM_ROOT alloc;
- init_alloc_root(&alloc,512,0);
+ init_alloc_root(&alloc, 512, 0, MYF(0));
if ((dirs= init_default_directories(&alloc)) == NULL)
{
diff --git a/mysys/errors.c b/mysys/errors.c
index bf7299ce4c6..6cc0f083922 100644
--- a/mysys/errors.c
+++ b/mysys/errors.c
@@ -1,5 +1,6 @@
/*
Copyright (c) 2000, 2010, Oracle and/or its affiliates
+ Copyright (c) 1995, 2012 Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -21,41 +22,41 @@
const char *globerrs[GLOBERRS]=
{
- "Can't create/write to file '%s' (Errcode: %d)",
- "Error reading file '%s' (Errcode: %d)",
- "Error writing file '%s' (Errcode: %d)",
- "Error on close of '%s' (Errcode: %d)",
+ "Can't create/write to file '%s' (Errcode: %M)",
+ "Error reading file '%s' (Errcode: %M)",
+ "Error writing file '%s' (Errcode: %M)",
+ "Error on close of '%s' (Errcode: %M)",
"Out of memory (Needed %u bytes)",
- "Error on delete of '%s' (Errcode: %d)",
- "Error on rename of '%s' to '%s' (Errcode: %d)",
+ "Error on delete of '%s' (Errcode: %M)",
+ "Error on rename of '%s' to '%s' (Errcode: %M)",
"",
- "Unexpected eof found when reading file '%s' (Errcode: %d)",
- "Can't lock file (Errcode: %d)",
- "Can't unlock file (Errcode: %d)",
- "Can't read dir of '%s' (Errcode: %d)",
- "Can't get stat of '%s' (Errcode: %d)",
- "Can't change size of file (Errcode: %d)",
- "Can't open stream from handle (Errcode: %d)",
- "Can't get working dirctory (Errcode: %d)",
- "Can't change dir to '%s' (Errcode: %d)",
+ "Unexpected end-of-file found when reading file '%s' (Errcode: %M)",
+ "Can't lock file (Errcode: %M)",
+ "Can't unlock file (Errcode: %M)",
+ "Can't read dir of '%s' (Errcode: %M)",
+ "Can't get stat of '%s' (Errcode: %M)",
+ "Can't change size of file (Errcode: %M)",
+ "Can't open stream from handle (Errcode: %M)",
+ "Can't get working directory (Errcode: %M)",
+ "Can't change dir to '%s' (Errcode: %M)",
"Warning: '%s' had %d links",
"Warning: %d files and %d streams is left open\n",
- "Disk is full writing '%s' (Errcode: %d). Waiting for someone to free space... (Expect up to %d secs delay for server to continue after freeing disk space)",
- "Can't create directory '%s' (Errcode: %d)",
+ "Disk is full writing '%s' (Errcode: %M). Waiting for someone to free space... (Expect up to %d secs delay for server to continue after freeing disk space)",
+ "Can't create directory '%s' (Errcode: %M)",
"Character set '%s' is not a compiled character set and is not specified in the '%s' file",
- "Out of resources when opening file '%s' (Errcode: %d)",
- "Can't read value for symlink '%s' (Error %d)",
- "Can't create symlink '%s' pointing at '%s' (Error %d)",
- "Error on realpath() on '%s' (Error %d)",
- "Can't sync file '%s' to disk (Errcode: %d)",
+ "Out of resources when opening file '%s' (Errcode: %M)",
+ "Can't read value for symlink '%s' (Errcode: %M)",
+ "Can't create symlink '%s' pointing at '%s' (Errcode: %M)",
+ "Error on realpath() on '%s' (Errcode: %M)",
+ "Can't sync file '%s' to disk (Errcode: %M)",
"Collation '%s' is not a compiled collation and is not specified in the '%s' file",
- "File '%s' not found (Errcode: %d)",
+ "File '%s' not found (Errcode: %M)",
"File '%s' (fileno: %d) was not closed",
- "Can't change ownership of the file '%s' (Errcode: %d)",
- "Can't change permissions of the file '%s' (Errcode: %d)",
- "Can't seek in file '%s' (Errcode: %d)"
- "Can't change mode for file '%s' to 0x%lx (Error: %d)",
- "Warning: Can't copy ownership for file '%s' (Error: %d)"
+ "Can't change ownership of the file '%s' (Errcode: %M)",
+ "Can't change permissions of the file '%s' (Errcode: %M)",
+ "Can't seek in file '%s' (Errcode: %M)"
+ "Can't change mode for file '%s' to 0x%lx (Errcode: %M)",
+ "Warning: Can't copy ownership for file '%s' (Errcode: %M)"
};
void init_glob_errs(void)
@@ -67,40 +68,40 @@ void init_glob_errs(void)
void init_glob_errs()
{
- EE(EE_CANTCREATEFILE) = "Can't create/write to file '%s' (Errcode: %d)";
- EE(EE_READ) = "Error reading file '%s' (Errcode: %d)";
- EE(EE_WRITE) = "Error writing file '%s' (Errcode: %d)";
- EE(EE_BADCLOSE) = "Error on close of '%'s (Errcode: %d)";
+ EE(EE_CANTCREATEFILE) = "Can't create/write to file '%s' (Errcode: %M)";
+ EE(EE_READ) = "Error reading file '%s' (Errcode: %M)";
+ EE(EE_WRITE) = "Error writing file '%s' (Errcode: %M)";
+ EE(EE_BADCLOSE) = "Error on close of '%'s (Errcode: %M)";
EE(EE_OUTOFMEMORY) = "Out of memory (Needed %u bytes)";
- EE(EE_DELETE) = "Error on delete of '%s' (Errcode: %d)";
- EE(EE_LINK) = "Error on rename of '%s' to '%s' (Errcode: %d)";
- EE(EE_EOFERR) = "Unexpected eof found when reading file '%s' (Errcode: %d)";
- EE(EE_CANTLOCK) = "Can't lock file (Errcode: %d)";
- EE(EE_CANTUNLOCK) = "Can't unlock file (Errcode: %d)";
- EE(EE_DIR) = "Can't read dir of '%s' (Errcode: %d)";
- EE(EE_STAT) = "Can't get stat of '%s' (Errcode: %d)";
- EE(EE_CANT_CHSIZE) = "Can't change size of file (Errcode: %d)";
- EE(EE_CANT_OPEN_STREAM)= "Can't open stream from handle (Errcode: %d)";
- EE(EE_GETWD) = "Can't get working directory (Errcode: %d)";
- EE(EE_SETWD) = "Can't change dir to '%s' (Errcode: %d)";
+ EE(EE_DELETE) = "Error on delete of '%s' (Errcode: %M)";
+ EE(EE_LINK) = "Error on rename of '%s' to '%s' (Errcode: %M)";
+ EE(EE_EOFERR) = "Unexpected eof found when reading file '%s' (Errcode: %M)";
+ EE(EE_CANTLOCK) = "Can't lock file (Errcode: %M)";
+ EE(EE_CANTUNLOCK) = "Can't unlock file (Errcode: %M)";
+ EE(EE_DIR) = "Can't read dir of '%s' (Errcode: %M)";
+ EE(EE_STAT) = "Can't get stat of '%s' (Errcode: %M)";
+ EE(EE_CANT_CHSIZE) = "Can't change size of file (Errcode: %M)";
+ EE(EE_CANT_OPEN_STREAM)= "Can't open stream from handle (Errcode: %M)";
+ EE(EE_GETWD) = "Can't get working directory (Errcode: %M)";
+ EE(EE_SETWD) = "Can't change dir to '%s' (Errcode: %M)";
EE(EE_LINK_WARNING) = "Warning: '%s' had %d links";
EE(EE_OPEN_WARNING) = "Warning: %d files and %d streams is left open\n";
- EE(EE_DISK_FULL) = "Disk is full writing '%s'. Waiting for someone to free space...";
- EE(EE_CANT_MKDIR) ="Can't create directory '%s' (Errcode: %d)";
+ EE(EE_DISK_FULL) = "Disk is full writing '%s' (Errcode: %M). Waiting for someone to free space... (Expect up to %d secs delay for server to continue after freeing disk space)",
+ EE(EE_CANT_MKDIR) ="Can't create directory '%s' (Errcode: %M)";
EE(EE_UNKNOWN_CHARSET)= "Character set '%s' is not a compiled character set and is not specified in the %s file";
- EE(EE_OUT_OF_FILERESOURCES)="Out of resources when opening file '%s' (Errcode: %d)";
- EE(EE_CANT_READLINK)= "Can't read value for symlink '%s' (Error %d)";
- EE(EE_CANT_SYMLINK)= "Can't create symlink '%s' pointing at '%s' (Error %d)";
- EE(EE_REALPATH)= "Error on realpath() on '%s' (Error %d)";
- EE(EE_SYNC)= "Can't sync file '%s' to disk (Errcode: %d)";
+ EE(EE_OUT_OF_FILERESOURCES)="Out of resources when opening file '%s' (Errcode: %M)";
+ EE(EE_CANT_READLINK)= "Can't read value for symlink '%s' (Errcode: %M)";
+ EE(EE_CANT_SYMLINK)= "Can't create symlink '%s' pointing at '%s' (Errcode: %M)";
+ EE(EE_REALPATH)= "Error on realpath() on '%s' (Errcode: %M)";
+ EE(EE_SYNC)= "Can't sync file '%s' to disk (Errcode: %M)";
EE(EE_UNKNOWN_COLLATION)= "Collation '%s' is not a compiled collation and is not specified in the %s file";
- EE(EE_FILENOTFOUND) = "File '%s' not found (Errcode: %d)";
+ EE(EE_FILENOTFOUND) = "File '%s' not found (Errcode: %M)";
EE(EE_FILE_NOT_CLOSED) = "File '%s' (fileno: %d) was not closed";
- EE(EE_CHANGE_OWNERSHIP) = "Can't change ownership of the file '%s' (Errcode: %d)";
- EE(EE_CHANGE_PERMISSIONS) = "Can't change permissions of the file '%s' (Errcode: %d)";
- EE(EE_CANT_SEEK) = "Can't seek in file '%s' (Errcode: %d)";
- EE(EE_CANT_CHMOD) = "Can't change mode for file '%s' to 0x%lx (Error: %d)";
- EE(EE_CANT_COPY_OWNERSHIP)= "Warning: Can't copy ownership for file '%s' (Error: %d)";
+ EE(EE_CHANGE_OWNERSHIP) = "Can't change ownership of the file '%s' (Errcode: %M)";
+ EE(EE_CHANGE_PERMISSIONS) = "Can't change permissions of the file '%s' (Errcode: %M)";
+ EE(EE_CANT_SEEK) = "Can't seek in file '%s' (Errcode: %M)";
+ EE(EE_CANT_CHMOD) = "Can't change mode for file '%s' to 0x%lx (Errcode: %M)";
+ EE(EE_CANT_COPY_OWNERSHIP)= "Warning: Can't copy ownership for file '%s' (Errcode: %M)";
}
#endif
diff --git a/mysys/hash.c b/mysys/hash.c
index b93f6b666d6..0a830d8e3f4 100644
--- a/mysys/hash.c
+++ b/mysys/hash.c
@@ -89,8 +89,10 @@ _my_hash_init(HASH *hash, uint growth_size, CHARSET_INFO *charset,
hash->free=free_element;
hash->flags=flags;
hash->charset=charset;
- res= my_init_dynamic_array_ci(&hash->array,
- sizeof(HASH_LINK), size, growth_size);
+ res= my_init_dynamic_array2(&hash->array,
+ sizeof(HASH_LINK), NULL, size, growth_size,
+ MYF((flags & HASH_THREAD_SPECIFIC ?
+ MY_THREAD_SPECIFIC : 0)));
DBUG_RETURN(res);
}
diff --git a/mysys/ma_dyncol.c b/mysys/ma_dyncol.c
index c717f69c3e5..8a224f1c5e8 100644
--- a/mysys/ma_dyncol.c
+++ b/mysys/ma_dyncol.c
@@ -1,5 +1,5 @@
-/* Copyright (c) 2011, Monty Program Ab
- Copyright (c) 2011, Oleksandr Byelkin
+/* Copyright (c) 2011,2012 Monty Program Ab;
+ Copyright (c) 2011,2012 Oleksandr Byelkin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -26,33 +26,103 @@
SUCH DAMAGE.
*/
+/*
+ Numeric format:
+ ===============
+ * Fixed header part
+ 1 byte flags:
+ 0,1 bits - <offset size> - 1
+ 2-7 bits - 0
+ 2 bytes column counter
+ * Columns directory sorted by column number, each entry contains of:
+ 2 bytes column number
+ <offset size> bytes (1-4) combined offset from beginning of
+ the data segment + 3 bit type
+ * Data of above columns size of data and length depend on type
+
+ Columns with names:
+ ===================
+ * Fixed header part
+ 1 byte flags:
+ 0,1 bits - <offset size> - 2
+ 2 bit - 1 (means format with names)
+ 3,4 bits - 00 (means <names offset size> - 2,
+ now 2 is the only supported size)
+ 5-7 bits - 0
+ 2 bytes column counter
+ * Variable header part (now it is actually fixed part)
+ <names offset size> (2) bytes size of stored names pool
+ * Column directory sorted by names, each consists of
+ <names offset size> (2) bytes offset of name
+ <offset size> bytes (2-5)bytes combined offset from beginning of
+ the data segment + 4 bit type
+ * Names stored one after another
+ * Data of above columns size of data and length depend on type
+*/
+
#include "mysys_priv.h"
#include <m_string.h>
#include <ma_dyncol.h>
+#include <my_time.h>
+uint32 copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length,
+ CHARSET_INFO *from_cs, uint *errors);
/*
Flag byte bits
2 bits which determinate size of offset in the header -1
*/
/* mask to get above bits */
-#define DYNCOL_FLG_OFFSET 3
-/* All known flags mask */
-#define DYNCOL_FLG_KNOWN 3
+#define DYNCOL_FLG_OFFSET (1|2)
+#define DYNCOL_FLG_NAMES 4
+#define DYNCOL_FLG_NMOFFSET (8|16)
+/**
+ All known flags mask that could be set.
+
+ @note DYNCOL_FLG_NMOFFSET should be 0 for now.
+*/
+#define DYNCOL_FLG_KNOWN (1|2|4)
+
+/* formats */
+enum enum_dyncol_format
+{
+ dyncol_fmt_num= 0,
+ dyncol_fmt_str= 1
+};
/* dynamic column size reserve */
#define DYNCOL_SYZERESERVE 80
+#define DYNCOL_OFFSET_ERROR 0xffffffff
+
/* length of fixed string header 1 byte - flags, 2 bytes - columns counter */
#define FIXED_HEADER_SIZE 3
+/*
+ length of fixed string header with names
+ 1 byte - flags, 2 bytes - columns counter, 2 bytes - name pool size
+*/
+#define FIXED_HEADER_SIZE_NM 5
#define COLUMN_NUMBER_SIZE 2
+/* 2 bytes offset from the name pool */
+#define COLUMN_NAMEPTR_SIZE 2
+
+#define MAX_OFFSET_LENGTH 4
+#define MAX_OFFSET_LENGTH_NM 5
+
+#define DYNCOL_NUM_CHAR 6
-#define MAX_OFFSET_LENGTH 5
+my_bool mariadb_dyncol_has_names(DYNAMIC_COLUMN *str)
+{
+ if (str->length < 1)
+ return FALSE;
+ return test(str->str[0] & DYNCOL_FLG_NAMES);
+}
static enum enum_dyncol_func_result
dynamic_column_time_store(DYNAMIC_COLUMN *str,
- MYSQL_TIME *value);
+ MYSQL_TIME *value, enum enum_dyncol_format format);
static enum enum_dyncol_func_result
dynamic_column_date_store(DYNAMIC_COLUMN *str,
MYSQL_TIME *value);
@@ -62,6 +132,546 @@ dynamic_column_time_read_internal(DYNAMIC_COLUMN_VALUE *store_it_here,
static enum enum_dyncol_func_result
dynamic_column_date_read_internal(DYNAMIC_COLUMN_VALUE *store_it_here,
uchar *data, size_t length);
+static enum enum_dyncol_func_result
+dynamic_column_get_internal(DYNAMIC_COLUMN *str,
+ DYNAMIC_COLUMN_VALUE *store_it_here,
+ uint num_key, LEX_STRING *str_key);
+static enum enum_dyncol_func_result
+dynamic_column_exists_internal(DYNAMIC_COLUMN *str, uint num_key,
+ LEX_STRING *str_key);
+static enum enum_dyncol_func_result
+dynamic_column_update_many_fmt(DYNAMIC_COLUMN *str,
+ uint add_column_count,
+ void *column_keys,
+ DYNAMIC_COLUMN_VALUE *values,
+ my_bool string_keys);
+static int plan_sort_num(const void *a, const void *b);
+static int plan_sort_named(const void *a, const void *b);
+
+/*
+ Structure to hold information about dynamic columns record and
+ iterate through it.
+*/
+
+struct st_dyn_header
+{
+ uchar *header, *nmpool, *dtpool, *data_end;
+ size_t offset_size;
+ size_t entry_size;
+ size_t header_size;
+ size_t nmpool_size;
+ size_t data_size;
+ /* dyncol_fmt_num - numeric columns, dyncol_fmt_str - column names */
+ enum enum_dyncol_format format;
+ uint column_count;
+
+ uchar *entry, *data, *name;
+ size_t offset;
+ size_t length;
+ enum enum_dynamic_column_type type;
+};
+
+typedef struct st_dyn_header DYN_HEADER;
+
+static inline my_bool read_fixed_header(DYN_HEADER *hdr,
+ DYNAMIC_COLUMN *str);
+static void set_fixed_header(DYNAMIC_COLUMN *str,
+ uint offset_size,
+ uint column_count);
+
+/*
+ Calculate entry size (E) and header size (H) by offset size (O) and column
+ count (C) and fixed part of entry size (F).
+*/
+
+#define calc_param(E,H,F,O,C) do { \
+ (*(E))= (O) + F; \
+ (*(H))= (*(E)) * (C); \
+}while(0);
+
+
+/**
+ Name pool size functions, for numeric format it is 0
+*/
+
+static size_t name_size_num(void *keys __attribute__((unused)),
+ uint i __attribute__((unused)))
+{
+ return 0;
+}
+
+
+/**
+ Name pool size functions.
+*/
+static size_t name_size_named(void *keys, uint i)
+{
+ return ((LEX_STRING *) keys)[i].length;
+}
+
+
+/**
+ Comparator function for references on column numbers for qsort
+ (numeric format)
+*/
+
+static int column_sort_num(const void *a, const void *b)
+{
+ return **((uint **)a) - **((uint **)b);
+}
+
+/**
+ Comparator function for references on column numbers for qsort
+ (names format)
+*/
+
+int mariadb_dyncol_column_cmp_named(const LEX_STRING *s1, const LEX_STRING *s2)
+{
+ /*
+ We compare instead of subtraction to avoid data loss in case of huge
+ length difference (more then fit in int).
+ */
+ int rc= (s1->length > s2->length ? 1 :
+ (s1->length < s2->length ? -1 : 0));
+ if (rc == 0)
+ rc= memcmp((void *)s1->str, (void *)s2->str,
+ (size_t) s1->length);
+ return rc;
+}
+
+
+/**
+ Comparator function for references on column numbers for qsort
+ (names format)
+*/
+
+static int column_sort_named(const void *a, const void *b)
+{
+ return mariadb_dyncol_column_cmp_named(*((LEX_STRING **)a),
+ *((LEX_STRING **)b));
+}
+
+
+/**
+ Check limit function (numeric format)
+*/
+
+static my_bool check_limit_num(const void *val)
+{
+ return **((uint **)val) > UINT_MAX16;
+}
+
+
+/**
+ Check limit function (names format)
+*/
+
+static my_bool check_limit_named(const void *val)
+{
+ return (*((LEX_STRING **)val))->length > MAX_NAME_LENGTH;
+}
+
+
+/**
+ Write numeric format static header part.
+*/
+
+static void set_fixed_header_num(DYNAMIC_COLUMN *str, DYN_HEADER *hdr)
+{
+ set_fixed_header(str, (uint)hdr->offset_size, hdr->column_count);
+ hdr->header= (uchar *)str->str + FIXED_HEADER_SIZE;
+ hdr->nmpool= hdr->dtpool= hdr->header + hdr->header_size;
+}
+
+
+/**
+ Write names format static header part.
+*/
+
+static void set_fixed_header_named(DYNAMIC_COLUMN *str, DYN_HEADER *hdr)
+{
+ DBUG_ASSERT(hdr->column_count <= 0xffff);
+ DBUG_ASSERT(hdr->offset_size <= MAX_OFFSET_LENGTH_NM);
+ /* size of data offset, named format flag, size of names offset (0 means 2) */
+ str->str[0]=
+ (char) ((str->str[0] & ~(DYNCOL_FLG_OFFSET | DYNCOL_FLG_NMOFFSET)) |
+ (hdr->offset_size - 2) | DYNCOL_FLG_NAMES);
+ int2store(str->str + 1, hdr->column_count); /* columns number */
+ int2store(str->str + 3, hdr->nmpool_size);
+ hdr->header= (uchar *)str->str + FIXED_HEADER_SIZE_NM;
+ hdr->nmpool= hdr->header + hdr->header_size;
+ hdr->dtpool= hdr->nmpool + hdr->nmpool_size;
+}
+
+
+/**
+ Store offset and type information in the given place
+
+ @param place Beginning of the index entry
+ @param offset_size Size of offset field in bytes
+ @param type Type to be written
+ @param offset Offset to be written
+*/
+
+static my_bool type_and_offset_store_num(uchar *place, size_t offset_size,
+ DYNAMIC_COLUMN_TYPE type,
+ size_t offset)
+{
+ ulong val = (((ulong) offset) << 3) | (type - 1);
+ DBUG_ASSERT(type != DYN_COL_NULL);
+ DBUG_ASSERT(((type - 1) & (~7)) == 0); /* fit in 3 bits */
+ DBUG_ASSERT(offset_size >= 1 && offset_size <= 4);
+
+ /* Index entry starts with column number; jump over it */
+ place+= COLUMN_NUMBER_SIZE;
+
+ switch (offset_size) {
+ case 1:
+ if (offset >= 0x1f) /* all 1 value is reserved */
+ return TRUE;
+ place[0]= (uchar)val;
+ break;
+ case 2:
+ if (offset >= 0x1fff) /* all 1 value is reserved */
+ return TRUE;
+ int2store(place, val);
+ break;
+ case 3:
+ if (offset >= 0x1fffff) /* all 1 value is reserved */
+ return TRUE;
+ int3store(place, val);
+ break;
+ case 4:
+ if (offset >= 0x1fffffff) /* all 1 value is reserved */
+ return TRUE;
+ int4store(place, val);
+ break;
+ default:
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+static my_bool type_and_offset_store_named(uchar *place, size_t offset_size,
+ DYNAMIC_COLUMN_TYPE type,
+ size_t offset)
+{
+ ulonglong val = (((ulong) offset) << 4) | (type - 1);
+ DBUG_ASSERT(type != DYN_COL_NULL);
+ DBUG_ASSERT(((type - 1) & (~0xf)) == 0); /* fit in 4 bits */
+ DBUG_ASSERT(offset_size >= 2 && offset_size <= 5);
+
+ /* Index entry starts with name offset; jump over it */
+ place+= COLUMN_NAMEPTR_SIZE;
+ switch (offset_size) {
+ case 2:
+ if (offset >= 0xfff) /* all 1 value is reserved */
+ return TRUE;
+ int2store(place, val);
+ break;
+ case 3:
+ if (offset >= 0xfffff) /* all 1 value is reserved */
+ return TRUE;
+ int3store(place, val);
+ break;
+ case 4:
+ if (offset >= 0xfffffff) /* all 1 value is reserved */
+ return TRUE;
+ int4store(place, val);
+ break;
+ case 5:
+#if SIZEOF_SIZE_T > 4
+ if (offset >= 0xfffffffffull) /* all 1 value is reserved */
+ return TRUE;
+#endif
+ int5store(place, val);
+ break;
+ case 1:
+ default:
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/**
+ Write numeric format header entry
+ 2 bytes - column number
+ 1-4 bytes - data offset combined with type
+
+ @param hdr descriptor of dynamic column record
+ @param column_key pointer to uint (column number)
+ @param value value which will be written (only type used)
+ @param offset offset of the data
+*/
+
+static my_bool put_header_entry_num(DYN_HEADER *hdr,
+ void *column_key,
+ DYNAMIC_COLUMN_VALUE *value,
+ size_t offset)
+{
+ uint *column_number= (uint *)column_key;
+ int2store(hdr->entry, *column_number);
+ DBUG_ASSERT(hdr->nmpool_size == 0);
+ if (type_and_offset_store_num(hdr->entry, hdr->offset_size,
+ value->type,
+ offset))
+ return TRUE;
+ hdr->entry= hdr->entry + hdr->entry_size;
+ return FALSE;
+}
+
+
+/**
+ Write names format header entry
+ 1 byte - name length
+ 2 bytes - name offset in the name pool
+ 1-4 bytes - data offset combined with type
+
+ @param hdr descriptor of dynamic column record
+ @param column_key pointer to LEX_STRING (column name)
+ @param value value which will be written (only type used)
+ @param offset offset of the data
+*/
+
+static my_bool put_header_entry_named(DYN_HEADER *hdr,
+ void *column_key,
+ DYNAMIC_COLUMN_VALUE *value,
+ size_t offset)
+{
+ LEX_STRING *column_name= (LEX_STRING *)column_key;
+ DBUG_ASSERT(column_name->length <= MAX_NAME_LENGTH);
+ DBUG_ASSERT(hdr->name - hdr->nmpool < (long) 0x10000L);
+ int2store(hdr->entry, hdr->name - hdr->nmpool);
+ memcpy(hdr->name, column_name->str, column_name->length);
+ DBUG_ASSERT(hdr->nmpool_size != 0 || column_name->length == 0);
+ if (type_and_offset_store_named(hdr->entry, hdr->offset_size,
+ value->type,
+ offset))
+ return TRUE;
+ hdr->entry+= hdr->entry_size;
+ hdr->name+= column_name->length;
+ return FALSE;
+}
+
+
+/**
+ Calculate length of offset field for given data length
+
+ @param data_length Length of the data segment
+
+ @return number of bytes
+*/
+
+static size_t dynamic_column_offset_bytes_num(size_t data_length)
+{
+ if (data_length < 0x1f) /* all 1 value is reserved */
+ return 1;
+ if (data_length < 0x1fff) /* all 1 value is reserved */
+ return 2;
+ if (data_length < 0x1fffff) /* all 1 value is reserved */
+ return 3;
+ if (data_length < 0x1fffffff) /* all 1 value is reserved */
+ return 4;
+ return MAX_OFFSET_LENGTH + 1; /* For an error generation*/
+}
+
+static size_t dynamic_column_offset_bytes_named(size_t data_length)
+{
+ if (data_length < 0xfff) /* all 1 value is reserved */
+ return 2;
+ if (data_length < 0xfffff) /* all 1 value is reserved */
+ return 3;
+ if (data_length < 0xfffffff) /* all 1 value is reserved */
+ return 4;
+#if SIZEOF_SIZE_T > 4
+ if (data_length < 0xfffffffffull) /* all 1 value is reserved */
+#endif
+ return 5;
+ return MAX_OFFSET_LENGTH_NM + 1; /* For an error generation */
+}
+
+/**
+ Read offset and type information from index entry
+
+ @param type Where to put type info
+ @param offset Where to put offset info
+ @param place beginning of the type and offset
+ @param offset_size Size of offset field in bytes
+*/
+
+static my_bool type_and_offset_read_num(DYNAMIC_COLUMN_TYPE *type,
+ size_t *offset,
+ uchar *place, size_t offset_size)
+{
+ ulong UNINIT_VAR(val);
+ ulong UNINIT_VAR(lim);
+
+ DBUG_ASSERT(offset_size >= 1 && offset_size <= 4);
+
+ switch (offset_size) {
+ case 1:
+ val= (ulong)place[0];
+ lim= 0x1f;
+ break;
+ case 2:
+ val= uint2korr(place);
+ lim= 0x1fff;
+ break;
+ case 3:
+ val= uint3korr(place);
+ lim= 0x1fffff;
+ break;
+ case 4:
+ val= uint4korr(place);
+ lim= 0x1fffffff;
+ break;
+ default:
+ DBUG_ASSERT(0); /* impossible */
+ return 1;
+ }
+ *type= (val & 0x7) + 1;
+ *offset= val >> 3;
+ return (*offset >= lim);
+}
+
+static my_bool type_and_offset_read_named(DYNAMIC_COLUMN_TYPE *type,
+ size_t *offset,
+ uchar *place, size_t offset_size)
+{
+ ulonglong UNINIT_VAR(val);
+ ulonglong UNINIT_VAR(lim);
+ DBUG_ASSERT(offset_size >= 2 && offset_size <= 5);
+
+ switch (offset_size) {
+ case 2:
+ val= uint2korr(place);
+ lim= 0xfff;
+ break;
+ case 3:
+ val= uint3korr(place);
+ lim= 0xfffff;
+ break;
+ case 4:
+ val= uint4korr(place);
+ lim= 0xfffffff;
+ break;
+ case 5:
+ val= uint5korr(place);
+ lim= 0xfffffffffull;
+ break;
+ case 1:
+ default:
+ DBUG_ASSERT(0); /* impossible */
+ return 1;
+ }
+ *type= (val & 0xf) + 1;
+ *offset= val >> 4;
+ return (*offset >= lim);
+}
+
+/**
+ Format descriptor, contain constants and function references for
+ format processing
+*/
+
+struct st_service_funcs
+{
+ /* size of fixed header */
+ uint fixed_hdr;
+ /* size of fixed part of header entry */
+ uint fixed_hdr_entry;
+
+ /*size of array element which stores keys */
+ uint key_size_in_array;
+
+ /* Maximum data offset size in bytes */
+ size_t max_offset_size;
+
+ size_t (*name_size)
+ (void *, uint);
+ int (*column_sort)
+ (const void *a, const void *b);
+ my_bool (*check_limit)
+ (const void *val);
+ void (*set_fixed_hdr)
+ (DYNAMIC_COLUMN *str, DYN_HEADER *hdr);
+ my_bool (*put_header_entry)(DYN_HEADER *hdr,
+ void *column_key,
+ DYNAMIC_COLUMN_VALUE *value,
+ size_t offset);
+ int (*plan_sort)(const void *a, const void *b);
+ size_t (*dynamic_column_offset_bytes)(size_t data_length);
+ my_bool (*type_and_offset_read)(DYNAMIC_COLUMN_TYPE *type,
+ size_t *offset,
+ uchar *place, size_t offset_size);
+
+};
+
+
+/**
+ Actual our 2 format descriptors
+*/
+
+static struct st_service_funcs fmt_data[2]=
+{
+ {
+ FIXED_HEADER_SIZE,
+ COLUMN_NUMBER_SIZE,
+ sizeof(uint),
+ MAX_OFFSET_LENGTH,
+ &name_size_num,
+ &column_sort_num,
+ &check_limit_num,
+ &set_fixed_header_num,
+ &put_header_entry_num,
+ &plan_sort_num,
+ &dynamic_column_offset_bytes_num,
+ &type_and_offset_read_num
+ },
+ {
+ FIXED_HEADER_SIZE_NM,
+ COLUMN_NAMEPTR_SIZE,
+ sizeof(LEX_STRING),
+ MAX_OFFSET_LENGTH_NM,
+ &name_size_named,
+ &column_sort_named,
+ &check_limit_named,
+ &set_fixed_header_named,
+ &put_header_entry_named,
+ &plan_sort_named,
+ &dynamic_column_offset_bytes_named,
+ &type_and_offset_read_named
+ }
+};
+
+
+/**
+ Read dynamic column record header and fill the descriptor
+
+ @param hdr dynamic columns record descriptor to fill
+ @param str dynamic columns record
+
+ @return ER_DYNCOL_* return code
+*/
+
+static enum enum_dyncol_func_result
+init_read_hdr(DYN_HEADER *hdr, DYNAMIC_COLUMN *str)
+{
+ if (read_fixed_header(hdr, str))
+ return ER_DYNCOL_FORMAT;
+ hdr->header= (uchar*)str->str + fmt_data[hdr->format].fixed_hdr;
+ calc_param(&hdr->entry_size, &hdr->header_size,
+ fmt_data[hdr->format].fixed_hdr_entry, hdr->offset_size,
+ hdr->column_count);
+ hdr->nmpool= hdr->header + hdr->header_size;
+ hdr->dtpool= hdr->nmpool + hdr->nmpool_size;
+ hdr->data_size= str->length - fmt_data[hdr->format].fixed_hdr -
+ hdr->header_size - hdr->nmpool_size;
+ hdr->data_end= (uchar*)str->str + str->length;
+ return ER_DYNCOL_OK;
+}
+
/**
Initialize dynamic column string with (make it empty but correct format)
@@ -73,7 +683,7 @@ dynamic_column_date_read_internal(DYNAMIC_COLUMN_VALUE *store_it_here,
@retval TRUE error
*/
-static my_bool dynamic_column_init_str(DYNAMIC_COLUMN *str, size_t size)
+static my_bool dynamic_column_init_named(DYNAMIC_COLUMN *str, size_t size)
{
DBUG_ASSERT(size != 0);
@@ -82,11 +692,8 @@ static my_bool dynamic_column_init_str(DYNAMIC_COLUMN *str, size_t size)
- First \0 is flags
- other 2 \0 is number of fields
*/
- if (init_dynamic_string(str, NULL,
- size + FIXED_HEADER_SIZE, DYNCOL_SYZERESERVE))
+ if (init_dynamic_string(str, NULL, size, DYNCOL_SYZERESERVE))
return TRUE;
- bzero(str->str, FIXED_HEADER_SIZE);
- str->length= FIXED_HEADER_SIZE;
return FALSE;
}
@@ -318,7 +925,8 @@ dynamic_column_sint_read(DYNAMIC_COLUMN_VALUE *store_it_here,
*/
static size_t
-dynamic_column_value_len(DYNAMIC_COLUMN_VALUE *value)
+dynamic_column_value_len(DYNAMIC_COLUMN_VALUE *value,
+ enum enum_dyncol_format format)
{
switch (value->type) {
case DYN_COL_NULL:
@@ -360,14 +968,22 @@ dynamic_column_value_len(DYNAMIC_COLUMN_VALUE *value)
decimal_bin_size(precision, scale));
}
case DYN_COL_DATETIME:
- /* date+time in bits: 14 + 4 + 5 + 10 + 6 + 6 + 20 + 1 66bits ~= 9 bytes */
- return 9;
+ if (format == dyncol_fmt_num || value->x.time_value.second_part)
+ /* date+time in bits: 14 + 4 + 5 + 10 + 6 + 6 + 20 + 1 66bits ~= 9 bytes*/
+ return 9;
+ else
+ return 6;
case DYN_COL_DATE:
/* date in dits: 14 + 4 + 5 = 23bits ~= 3bytes*/
return 3;
case DYN_COL_TIME:
- /* time in bits: 10 + 6 + 6 + 20 + 1 = 43bits ~= 6bytes*/
- return 6;
+ if (format == dyncol_fmt_num || value->x.time_value.second_part)
+ /* time in bits: 10 + 6 + 6 + 20 + 1 = 43bits ~= 6bytes*/
+ return 6;
+ else
+ return 3;
+ case DYN_COL_DYNCOL:
+ return value->x.string.value.length;
}
DBUG_ASSERT(0);
return 0;
@@ -436,6 +1052,22 @@ dynamic_column_string_store(DYNAMIC_COLUMN *str, LEX_STRING *string,
return ER_DYNCOL_OK;
}
+/**
+ Append the string with given string value.
+
+ @param str the string where to put the value
+ @param val the value to put in the string
+
+ @return ER_DYNCOL_* return code
+*/
+
+static enum enum_dyncol_func_result
+dynamic_column_dyncol_store(DYNAMIC_COLUMN *str, LEX_STRING *string)
+{
+ if (dynstr_append_mem(str, string->str, string->length))
+ return ER_DYNCOL_RESOURCE;
+ return ER_DYNCOL_OK;
+}
/**
Read string value of given length from the packed string
@@ -464,6 +1096,26 @@ dynamic_column_string_read(DYNAMIC_COLUMN_VALUE *store_it_here,
return ER_DYNCOL_OK;
}
+/**
+ Read Dynamic columns packet string value of given length
+ from the packed string
+
+ @param store_it_here The structure to store the value
+ @param data The packed string which should be read
+ @param length The length (in bytes) of the value in nthe string
+
+ @return ER_DYNCOL_* return code
+*/
+
+static enum enum_dyncol_func_result
+dynamic_column_dyncol_read(DYNAMIC_COLUMN_VALUE *store_it_here,
+ uchar *data, size_t length)
+{
+ store_it_here->x.string.charset= &my_charset_bin;
+ store_it_here->x.string.value.length= length;
+ store_it_here->x.string.value.str= (char*) data;
+ return ER_DYNCOL_OK;
+}
/**
Append the string with given decimal value.
@@ -506,7 +1158,7 @@ dynamic_column_decimal_store(DYNAMIC_COLUMN *str,
@param value The value structure which sould be setup.
*/
-void dynamic_column_prepare_decimal(DYNAMIC_COLUMN_VALUE *value)
+void mariadb_dyncol_prepare_decimal(DYNAMIC_COLUMN_VALUE *value)
{
value->x.decimal.value.buf= value->x.decimal.buffer;
value->x.decimal.value.len= DECIMAL_BUFF_LENGTH;
@@ -515,6 +1167,12 @@ void dynamic_column_prepare_decimal(DYNAMIC_COLUMN_VALUE *value)
decimal_make_zero(&value->x.decimal.value);
}
+void dynamic_column_prepare_decimal(DYNAMIC_COLUMN_VALUE *value)
+{
+ mariadb_dyncol_prepare_decimal(value);
+}
+
+
/**
Read decimal value of given length from the string
@@ -570,7 +1228,8 @@ dynamic_column_decimal_read(DYNAMIC_COLUMN_VALUE *store_it_here,
*/
static enum enum_dyncol_func_result
-dynamic_column_date_time_store(DYNAMIC_COLUMN *str, MYSQL_TIME *value)
+dynamic_column_date_time_store(DYNAMIC_COLUMN *str, MYSQL_TIME *value,
+ enum enum_dyncol_format format)
{
enum enum_dyncol_func_result rc;
/*
@@ -579,7 +1238,7 @@ dynamic_column_date_time_store(DYNAMIC_COLUMN *str, MYSQL_TIME *value)
<123456><123456><123456><123456><123456><123456><123456><123456><123456>
*/
if ((rc= dynamic_column_date_store(str, value)) ||
- (rc= dynamic_column_time_store(str, value)))
+ (rc= dynamic_column_time_store(str, value, format)))
return rc;
return ER_DYNCOL_OK;
}
@@ -605,11 +1264,12 @@ dynamic_column_date_time_read(DYNAMIC_COLUMN_VALUE *store_it_here,
12345678901234123412345 1123456789012345612345612345678901234567890
<123456><123456><123456><123456><123456><123456><123456><123456><123456>
*/
- if (length != 9)
+ if (length != 9 && length != 6)
goto err;
store_it_here->x.time_value.time_type= MYSQL_TIMESTAMP_DATETIME;
if ((rc= dynamic_column_date_read_internal(store_it_here, data, 3)) ||
- (rc= dynamic_column_time_read_internal(store_it_here, data + 3, 6)))
+ (rc= dynamic_column_time_read_internal(store_it_here, data + 3,
+ length - 3)))
goto err;
return ER_DYNCOL_OK;
@@ -629,7 +1289,8 @@ err:
*/
static enum enum_dyncol_func_result
-dynamic_column_time_store(DYNAMIC_COLUMN *str, MYSQL_TIME *value)
+dynamic_column_time_store(DYNAMIC_COLUMN *str, MYSQL_TIME *value,
+ enum enum_dyncol_format format)
{
uchar *buf;
if (dynstr_realloc(str, 6))
@@ -651,19 +1312,35 @@ dynamic_column_time_store(DYNAMIC_COLUMN *str, MYSQL_TIME *value)
DBUG_ASSERT(value->minute <= 59);
DBUG_ASSERT(value->second <= 59);
DBUG_ASSERT(value->second_part <= 999999);
+ if (format == dyncol_fmt_num || value->second_part)
+ {
/*
00000!<-hours--><min-><sec-><---microseconds--->
1123456789012345612345612345678901234567890
<123456><123456><123456><123456><123456><123456>
*/
- buf[0]= (value->second_part & 0xff);
- buf[1]= ((value->second_part & 0xff00) >> 8);
- buf[2]= (uchar)(((value->second & 0xf) << 4) |
- ((value->second_part & 0xf0000) >> 16));
- buf[3]= ((value->minute << 2) | ((value->second & 0x30) >> 4));
- buf[4]= (value->hour & 0xff);
- buf[5]= ((value->neg ? 0x4 : 0) | (value->hour >> 8));
- str->length+= 6;
+ buf[0]= (value->second_part & 0xff);
+ buf[1]= ((value->second_part & 0xff00) >> 8);
+ buf[2]= (uchar)(((value->second & 0xf) << 4) |
+ ((value->second_part & 0xf0000) >> 16));
+ buf[3]= ((value->minute << 2) | ((value->second & 0x30) >> 4));
+ buf[4]= (value->hour & 0xff);
+ buf[5]= ((value->neg ? 0x4 : 0) | (value->hour >> 8));
+ str->length+= 6;
+ }
+ else
+ {
+ /*
+ !<-hours--><min-><sec->
+ 11234567890123456123456
+ <123456><123456><123456>
+ */
+ buf[0]= (value->second) | ((value->minute & 0x3) << 6);
+ buf[1]= (value->minute >> 2) | ((value->hour & 0xf) << 4);
+ buf[2]= (value->hour >> 4) | (value->neg ? 0x80 : 0);
+ str->length+= 3;
+ }
+
return ER_DYNCOL_OK;
}
@@ -702,21 +1379,37 @@ static enum enum_dyncol_func_result
dynamic_column_time_read_internal(DYNAMIC_COLUMN_VALUE *store_it_here,
uchar *data, size_t length)
{
- if (length != 6)
+ if (length != 6 && length != 3)
goto err;
- /*
- 00000!<-hours--><min-><sec-><---microseconds--->
- 1123456789012345612345612345678901234567890
- <123456><123456><123456><123456><123456><123456>
- */
- store_it_here->x.time_value.second_part= (data[0] |
- (data[1] << 8) |
- ((data[2] & 0xf) << 16));
- store_it_here->x.time_value.second= ((data[2] >> 4) |
- ((data[3] & 0x3) << 4));
- store_it_here->x.time_value.minute= (data[3] >> 2);
- store_it_here->x.time_value.hour= (((((uint)data[5]) & 0x3 ) << 8) | data[4]);
- store_it_here->x.time_value.neg= ((data[5] & 0x4) ? 1 : 0);
+ if (length == 6)
+ {
+ /*
+ 00000!<-hours--><min-><sec-><---microseconds--->
+ 1123456789012345612345612345678901234567890
+ <123456><123456><123456><123456><123456><123456>
+ */
+ store_it_here->x.time_value.second_part= (data[0] |
+ (data[1] << 8) |
+ ((data[2] & 0xf) << 16));
+ store_it_here->x.time_value.second= ((data[2] >> 4) |
+ ((data[3] & 0x3) << 4));
+ store_it_here->x.time_value.minute= (data[3] >> 2);
+ store_it_here->x.time_value.hour= (((((uint)data[5]) & 0x3 ) << 8) | data[4]);
+ store_it_here->x.time_value.neg= ((data[5] & 0x4) ? 1 : 0);
+ }
+ else
+ {
+ /*
+ !<-hours--><min-><sec->
+ 11234567890123456123456
+ <123456><123456><123456>
+ */
+ store_it_here->x.time_value.second_part= 0;
+ store_it_here->x.time_value.second= (data[0] & 0x3f);
+ store_it_here->x.time_value.minute= (data[0] >> 6) | ((data[1] & 0xf) << 2);
+ store_it_here->x.time_value.hour= (data[1] >> 4) | ((data[2] & 0x3f) << 4);
+ store_it_here->x.time_value.neg= ((data[2] & 0x80) ? 1 : 0);
+ }
if (store_it_here->x.time_value.second > 59 ||
store_it_here->x.time_value.minute > 59 ||
store_it_here->x.time_value.hour > 838 ||
@@ -841,7 +1534,8 @@ err:
*/
static enum enum_dyncol_func_result
-data_store(DYNAMIC_COLUMN *str, DYNAMIC_COLUMN_VALUE *value)
+data_store(DYNAMIC_COLUMN *str, DYNAMIC_COLUMN_VALUE *value,
+ enum enum_dyncol_format format)
{
switch (value->type) {
case DYN_COL_INT:
@@ -857,13 +1551,15 @@ data_store(DYNAMIC_COLUMN *str, DYNAMIC_COLUMN_VALUE *value)
return dynamic_column_decimal_store(str, &value->x.decimal.value);
case DYN_COL_DATETIME:
/* date+time in bits: 14 + 4 + 5 + 5 + 6 + 6 40bits = 5 bytes */
- return dynamic_column_date_time_store(str, &value->x.time_value);
+ return dynamic_column_date_time_store(str, &value->x.time_value, format);
case DYN_COL_DATE:
/* date in dits: 14 + 4 + 5 = 23bits ~= 3bytes*/
return dynamic_column_date_store(str, &value->x.time_value);
case DYN_COL_TIME:
/* time in bits: 5 + 6 + 6 = 17bits ~= 3bytes*/
- return dynamic_column_time_store(str, &value->x.time_value);
+ return dynamic_column_time_store(str, &value->x.time_value, format);
+ case DYN_COL_DYNCOL:
+ return dynamic_column_dyncol_store(str, &value->x.string.value);
case DYN_COL_NULL:
break; /* Impossible */
}
@@ -873,117 +1569,6 @@ data_store(DYNAMIC_COLUMN *str, DYNAMIC_COLUMN_VALUE *value)
/**
- Calculate length of offset field for given data length
-
- @param data_length Length of the data segment
-
- @return number of bytes
-*/
-
-static size_t dynamic_column_offset_bytes(size_t data_length)
-{
- if (data_length < 0x1f) /* all 1 value is reserved */
- return 1;
- if (data_length < 0x1fff) /* all 1 value is reserved */
- return 2;
- if (data_length < 0x1fffff) /* all 1 value is reserved */
- return 3;
- if (data_length < 0x1fffffff) /* all 1 value is reserved */
- return 4;
- return MAX_OFFSET_LENGTH; /* For future */
-}
-
-/**
- Store offset and type information in the given place
-
- @param place Beginning of the index entry
- @param offset_size Size of offset field in bytes
- @param type Type to be written
- @param offset Offset to be written
-*/
-
-static void type_and_offset_store(uchar *place, size_t offset_size,
- DYNAMIC_COLUMN_TYPE type,
- size_t offset)
-{
- ulong val = (((ulong) offset) << 3) | (type - 1);
- DBUG_ASSERT(type != DYN_COL_NULL);
- DBUG_ASSERT(((type - 1) & (~7)) == 0); /* fit in 3 bits */
-
- /* Index entry starts with column number; Jump over it */
- place+= COLUMN_NUMBER_SIZE;
-
- switch (offset_size) {
- case 1:
- DBUG_ASSERT(offset < 0x1f); /* all 1 value is reserved */
- place[0]= (uchar)val;
- break;
- case 2:
- DBUG_ASSERT(offset < 0x1fff); /* all 1 value is reserved */
- int2store(place, val);
- break;
- case 3:
- DBUG_ASSERT(offset < 0x1fffff); /* all 1 value is reserved */
- int3store(place, val);
- break;
- case 4:
- DBUG_ASSERT(offset < 0x1fffffff); /* all 1 value is reserved */
- int4store(place, val);
- break;
- default:
- DBUG_ASSERT(0); /* impossible */
- }
-}
-
-
-/**
- Read offset and type information from index entry
-
- @param type Where to put type info
- @param offset Where to put offset info
- @param place Beginning of the index entry
- @param offset_size Size of offset field in bytes
-*/
-
-static void type_and_offset_read(DYNAMIC_COLUMN_TYPE *type,
- size_t *offset,
- uchar *place, size_t offset_size)
-{
- ulong UNINIT_VAR(val);
-
- place+= COLUMN_NUMBER_SIZE; /* skip column number */
- switch (offset_size) {
- case 1:
- val= (ulong)place[0];
- break;
- case 2:
- val= uint2korr(place);
- break;
- case 3:
- val= uint3korr(place);
- break;
- case 4:
- val= uint4korr(place);
- break;
- default:
- DBUG_ASSERT(0); /* impossible */
- }
- *type= (val & 0x7) + 1;
- *offset= val >> 3;
-}
-
-
-/**
- Comparator function for references on column numbers for qsort
-*/
-
-static int column_sort(const void *a, const void *b)
-{
- return **((uint **)a) - **((uint **)b);
-}
-
-
-/**
Write information to the fixed header
@param str String where to write the header
@@ -996,34 +1581,20 @@ static void set_fixed_header(DYNAMIC_COLUMN *str,
uint column_count)
{
DBUG_ASSERT(column_count <= 0xffff);
- DBUG_ASSERT(offset_size <= 4);
+ DBUG_ASSERT(offset_size <= MAX_OFFSET_LENGTH);
str->str[0]= ((str->str[0] & ~DYNCOL_FLG_OFFSET) |
(offset_size - 1)); /* size of offset */
int2store(str->str + 1, column_count); /* columns number */
DBUG_ASSERT((str->str[0] & (~DYNCOL_FLG_KNOWN)) == 0);
}
-/*
- Calculate entry size (E) and header size (H) by offset size (O) and column
- count (C).
-*/
-
-#define calc_param(E,H,O,C) do { \
- (*(E))= (O) + COLUMN_NUMBER_SIZE; \
- (*(H))= (*(E)) * (C); \
-}while(0);
-
-
/**
Adds columns into the empty string
- @param str String where to write the data
- @param header_size Size of the header without fixed part
- @param offset_size Size of offset field in bytes
+ @param str String where to write the data (the record)
+ @param hdr Dynamic columns record descriptor
@param column_count Number of columns in the arrays
- @parem not_null_count Number of non-null columns in the arrays
- @param data_size Size of the data segment
- @param column_numbers Array of columns numbers
+ @param column_keys Array of columns keys (uint or LEX_STRING)
@param values Array of columns values
@param new_str True if we need to allocate new string
@@ -1032,42 +1603,54 @@ static void set_fixed_header(DYNAMIC_COLUMN *str,
static enum enum_dyncol_func_result
dynamic_new_column_store(DYNAMIC_COLUMN *str,
- size_t header_size,
- size_t offset_size,
+ DYN_HEADER *hdr,
uint column_count,
- uint not_null_count,
- size_t data_size,
- uint *column_numbers,
+ void *column_keys,
DYNAMIC_COLUMN_VALUE *values,
my_bool new_str)
{
- uchar *header_end;
- uint **columns_order;
+ struct st_service_funcs *fmt= fmt_data + hdr->format;
+ void **columns_order;
+ uchar *element;
uint i;
- uint entry_size= COLUMN_NUMBER_SIZE + offset_size;
enum enum_dyncol_func_result rc= ER_DYNCOL_RESOURCE;
+ size_t all_headers_size;
- if (!(columns_order= malloc(sizeof(uint*)*column_count)))
+ if (!(columns_order= malloc(sizeof(void*)*column_count)))
return ER_DYNCOL_RESOURCE;
if (new_str)
{
- if (dynamic_column_init_str(str,
- data_size + header_size + DYNCOL_SYZERESERVE))
+ if (dynamic_column_init_named(str,
+ fmt->fixed_hdr +
+ hdr->header_size +
+ hdr->nmpool_size +
+ hdr->data_size +
+ DYNCOL_SYZERESERVE))
goto err;
}
else
{
str->length= 0;
- if (dynstr_realloc(str, data_size + header_size + DYNCOL_SYZERESERVE))
+ if (dynstr_realloc(str,
+ fmt->fixed_hdr +
+ hdr->header_size +
+ hdr->nmpool_size +
+ hdr->data_size +
+ DYNCOL_SYZERESERVE))
goto err;
- bzero(str->str, FIXED_HEADER_SIZE);
- str->length= FIXED_HEADER_SIZE;
}
+ if (!column_count)
+ return ER_DYNCOL_OK;
+
+ bzero(str->str, fmt->fixed_hdr);
+ str->length= fmt->fixed_hdr;
/* sort columns for the header */
- for (i= 0; i < column_count; i++)
- columns_order[i]= column_numbers + i;
- qsort(columns_order, (size_t)column_count, sizeof(uint*), &column_sort);
+ for (i= 0, element= (uchar *) column_keys;
+ i < column_count;
+ i++, element+= fmt->key_size_in_array)
+ columns_order[i]= (void *)element;
+ qsort(columns_order, (size_t)column_count, sizeof(void*), fmt->column_sort);
/*
For now we don't allow creating two columns with the same number
@@ -1076,38 +1659,43 @@ dynamic_new_column_store(DYNAMIC_COLUMN *str,
*/
for (i= 0; i < column_count - 1; i++)
{
- if (columns_order[i][0] > UINT_MAX16 ||
- columns_order[i][0] == columns_order[i + 1][0])
+ if ((*fmt->check_limit)(&columns_order[i]) ||
+ (*fmt->column_sort)(&columns_order[i], &columns_order[i + 1]) == 0)
{
rc= ER_DYNCOL_DATA;
goto err;
}
}
- if (columns_order[i][0] > UINT_MAX16)
+ if ((*fmt->check_limit)(&columns_order[i]))
{
rc= ER_DYNCOL_DATA;
goto err;
}
- DBUG_ASSERT(str->max_length >= str->length + header_size);
- set_fixed_header(str, offset_size, not_null_count);
- str->length+= header_size; /* reserve place for header */
- header_end= (uchar *)str->str + FIXED_HEADER_SIZE;
+ (*fmt->set_fixed_hdr)(str, hdr);
+ /* reserve place for header and name pool */
+ str->length+= hdr->header_size + hdr->nmpool_size;
+
+ hdr->entry= hdr->header;
+ hdr->name= hdr->nmpool;
+ all_headers_size= fmt->fixed_hdr + hdr->header_size + hdr->nmpool_size;
for (i= 0; i < column_count; i++)
{
- uint ord= columns_order[i] - column_numbers;
+ uint ord= (uint)(((uchar*)columns_order[i] - (uchar*)column_keys) /
+ fmt->key_size_in_array);
if (values[ord].type != DYN_COL_NULL)
{
/* Store header first in the str */
- int2store(header_end, column_numbers[ord]);
- type_and_offset_store(header_end, offset_size,
- values[ord].type,
- str->length - header_size - FIXED_HEADER_SIZE);
+ if ((*fmt->put_header_entry)(hdr, columns_order[i], values + ord,
+ str->length - all_headers_size))
+ {
+ rc= ER_DYNCOL_FORMAT;
+ goto err;
+ }
/* Store value in 'str + str->length' and increase str->length */
- if ((rc= data_store(str, values + ord)))
+ if ((rc= data_store(str, values + ord, hdr->format)))
goto err;
- header_end+= entry_size;
}
}
rc= ER_DYNCOL_OK;
@@ -1117,61 +1705,92 @@ err:
}
/**
- Create packed string which contains given columns (internal)
+ Calculate size of header, name pool and data pool
- @param str String where to write the data
+ @param hdr descriptor of dynamic column record
+ @param column_count number of elements in arrays
@param column_count Number of columns in the arrays
- @param column_numbers Array of columns numbers
+ @param column_keys Array of columns keys (uint or LEX_STRING)
@param values Array of columns values
- @param new_str True if we need allocate new string
@return ER_DYNCOL_* return code
*/
static enum enum_dyncol_func_result
-dynamic_column_create_many_internal(DYNAMIC_COLUMN *str,
- uint column_count,
- uint *column_numbers,
- DYNAMIC_COLUMN_VALUE *values,
- my_bool new_str)
-{
- size_t data_size= 0;
- size_t header_size, offset_size;
+calc_var_sizes(DYN_HEADER *hdr,
+ uint column_count,
+ void *column_keys,
+ DYNAMIC_COLUMN_VALUE *values)
+{
+ struct st_service_funcs *fmt= fmt_data + hdr->format;
uint i;
- int not_null_column_count= 0;
-
- if (new_str)
- {
- /* to make dynstr_free() working in case of errors */
- bzero(str, sizeof(DYNAMIC_COLUMN));
- }
-
+ hdr->nmpool_size= hdr->data_size= 0;
+ hdr->column_count= 0;
for (i= 0; i < column_count; i++)
{
if (values[i].type != DYN_COL_NULL)
{
size_t tmp;
- not_null_column_count++;
- data_size+= (tmp=dynamic_column_value_len(values + i));
+ hdr->column_count++;
+ hdr->data_size+= (tmp= dynamic_column_value_len(values + i,
+ hdr->format));
if (tmp == (size_t) ~0)
return ER_DYNCOL_DATA;
+ hdr->nmpool_size+= (*fmt->name_size)(column_keys, i);
}
}
-
- /* We can handle data up to 1fffffff = 536870911 bytes now */
- if ((offset_size= dynamic_column_offset_bytes(data_size)) >=
- MAX_OFFSET_LENGTH)
+ /*
+ We can handle data up to 0x1fffffff (old format) and
+ 0xfffffffff (new format) bytes now.
+ */
+ if ((hdr->offset_size= fmt->dynamic_column_offset_bytes(hdr->data_size)) >=
+ fmt->max_offset_size)
return ER_DYNCOL_LIMIT;
- /* header entry is column number + offset & type */
- header_size= not_null_column_count * (offset_size + 2);
+ /* header entry is column number or string pointer + offset & type */
+ hdr->entry_size= fmt->fixed_hdr_entry + hdr->offset_size;
+ hdr->header_size= hdr->column_count * hdr->entry_size;
+ return ER_DYNCOL_OK;
+}
+
+/**
+ Create packed string which contains given columns (internal multi format)
+
+ @param str String where to write the data
+ @param column_count Number of columns in the arrays
+ @param column_keys Array of columns keys (format dependent)
+ @param values Array of columns values
+ @param new_str True if we need allocate new string
+ @param string_keys keys are strings
+
+ @return ER_DYNCOL_* return code
+*/
+
+static enum enum_dyncol_func_result
+dynamic_column_create_many_internal_fmt(DYNAMIC_COLUMN *str,
+ uint column_count,
+ void *column_keys,
+ DYNAMIC_COLUMN_VALUE *values,
+ my_bool new_str,
+ my_bool string_keys)
+{
+ DYN_HEADER header;
+ enum enum_dyncol_func_result rc;
+ bzero(&header, sizeof(header));
+ header.format= (string_keys ? 1 : 0);
- return dynamic_new_column_store(str,
- header_size, offset_size,
+ if (new_str)
+ {
+ /* to make dynstr_free() working in case of errors */
+ bzero(str, sizeof(DYNAMIC_COLUMN));
+ }
+
+ if ((rc= calc_var_sizes(&header, column_count, column_keys, values)) < 0)
+ return rc;
+
+ return dynamic_new_column_store(str, &header,
column_count,
- not_null_column_count,
- data_size,
- column_numbers, values,
+ column_keys, values,
new_str);
}
@@ -1194,11 +1813,60 @@ dynamic_column_create_many(DYNAMIC_COLUMN *str,
DYNAMIC_COLUMN_VALUE *values)
{
DBUG_ENTER("dynamic_column_create_many");
- DBUG_RETURN(dynamic_column_create_many_internal(str, column_count,
- column_numbers, values,
- TRUE));
+ DBUG_RETURN(dynamic_column_create_many_internal_fmt(str, column_count,
+ column_numbers, values,
+ TRUE, FALSE));
+}
+
+/**
+ Create packed string which contains given columns
+
+ @param str String where to write the data
+ @param column_count Number of columns in the arrays
+ @param column_numbers Array of columns numbers
+ @param values Array of columns values
+ @param new_string True if we need allocate new string
+
+ @return ER_DYNCOL_* return code
+*/
+
+enum enum_dyncol_func_result
+mariadb_dyncol_create_many(DYNAMIC_COLUMN *str,
+ uint column_count,
+ uint *column_numbers,
+ DYNAMIC_COLUMN_VALUE *values,
+ my_bool new_string)
+{
+ DBUG_ENTER("mariadb_dyncol_create_many");
+ DBUG_RETURN(dynamic_column_create_many_internal_fmt(str, column_count,
+ column_numbers, values,
+ new_string, FALSE));
}
+/**
+ Create packed string which contains given columns
+
+ @param str String where to write the data
+ @param column_count Number of columns in the arrays
+ @param column_keys Array of columns keys
+ @param values Array of columns value
+ @param new_string True if we need allocate new string
+
+ @return ER_DYNCOL_* return code
+*/
+
+enum enum_dyncol_func_result
+mariadb_dyncol_create_many_named(DYNAMIC_COLUMN *str,
+ uint column_count,
+ LEX_STRING *column_keys,
+ DYNAMIC_COLUMN_VALUE *values,
+ my_bool new_string)
+{
+ DBUG_ENTER("mariadb_dyncol_create_many_named");
+ DBUG_RETURN(dynamic_column_create_many_internal_fmt(str, column_count,
+ column_keys, values,
+ new_string, TRUE));
+}
/**
Create packed string which contains given column
@@ -1239,32 +1907,48 @@ static size_t get_length_interval(uchar *entry, uchar *entry_next,
DYNAMIC_COLUMN_TYPE type, type_next;
DBUG_ASSERT(entry < entry_next);
- type_and_offset_read(&type, &offset, entry, offset_size);
+ if (type_and_offset_read_num(&type, &offset, entry + COLUMN_NUMBER_SIZE,
+ offset_size))
+ return DYNCOL_OFFSET_ERROR;
if (entry_next >= header_end)
return (last_offset - offset);
- type_and_offset_read(&type_next, &offset_next, entry_next, offset_size);
+ if (type_and_offset_read_num(&type_next, &offset_next,
+ entry_next + COLUMN_NUMBER_SIZE, offset_size))
+ return DYNCOL_OFFSET_ERROR;
return (offset_next - offset);
}
-/*
- Calculate length of data of one column
+/**
+ Calculate length of data between given hdr->entry and next_entry
- @param entry Pointer to the first entry
- @param header_end Pointer to the header end
- @param offset_size Size of offset field in bytes
- @param last_offset Size of the data segment
+ @param hdr descriptor of dynamic column record
+ @param next_entry next header entry (can point just after last header
+ entry)
@return number of bytes
*/
-static size_t get_length(uchar *entry, uchar *header_end,
- size_t offset_size,
- size_t last_offset)
+static size_t hdr_interval_length(DYN_HEADER *hdr, uchar *next_entry)
{
- return get_length_interval(entry,
- entry + offset_size + COLUMN_NUMBER_SIZE,
- header_end, offset_size, last_offset);
+ struct st_service_funcs *fmt= fmt_data + hdr->format;
+ size_t next_entry_offset;
+ DYNAMIC_COLUMN_TYPE next_entry_type;
+ DBUG_ASSERT(hdr->entry < next_entry);
+ DBUG_ASSERT(hdr->entry >= hdr->header);
+ DBUG_ASSERT(next_entry <= hdr->header + hdr->header_size);
+
+ if ((*fmt->type_and_offset_read)(&hdr->type, &hdr->offset,
+ hdr->entry + fmt->fixed_hdr_entry,
+ hdr->offset_size))
+ return DYNCOL_OFFSET_ERROR;
+ if (next_entry == hdr->header + hdr->header_size)
+ return hdr->data_size - hdr->offset;
+ if ((*fmt->type_and_offset_read)(&next_entry_type, &next_entry_offset,
+ next_entry + fmt->fixed_hdr_entry,
+ hdr->offset_size))
+ return DYNCOL_OFFSET_ERROR;
+ return (next_entry_offset - hdr->offset);
}
@@ -1272,7 +1956,7 @@ static size_t get_length(uchar *entry, uchar *header_end,
Comparator function for references to header entries for qsort
*/
-static int header_compar(const void *a, const void *b)
+static int header_compar_num(const void *a, const void *b)
{
uint va= uint2korr((uchar*)a), vb= uint2korr((uchar*)b);
return (va > vb ? 1 : (va < vb ? -1 : 0));
@@ -1280,70 +1964,183 @@ static int header_compar(const void *a, const void *b)
/**
+ Find entry in the numeric format header by the column number
+
+ @param hdr descriptor of dynamic column record
+ @param key number to find
+
+ @return pointer to the entry or NULL
+*/
+
+static uchar *find_entry_num(DYN_HEADER *hdr, uint key)
+{
+ uchar header_entry[2+4];
+ DBUG_ASSERT(hdr->format == dyncol_fmt_num);
+ int2store(header_entry, key);
+ return hdr->entry= bsearch(header_entry, hdr->header,
+ (size_t)hdr->column_count,
+ hdr->entry_size, &header_compar_num);
+}
+
+
+/**
+ Read name from header entry
+
+ @param hdr descriptor of dynamic column record
+ @param entry pointer to the header entry
+ @param name where to put name
+
+ @return 0 ok
+ @return 1 error in data
+*/
+
+static my_bool read_name(DYN_HEADER *hdr, uchar *entry, LEX_STRING *name)
+{
+ size_t nmoffset= uint2korr(entry);
+ uchar *next_entry= entry + hdr->entry_size;
+
+ if (nmoffset > hdr->nmpool_size)
+ return 1;
+
+ name->str= (char *)hdr->nmpool + nmoffset;
+ if (next_entry == hdr->header + hdr->header_size)
+ name->length= hdr->nmpool_size - nmoffset;
+ else
+ {
+ size_t next_nmoffset= uint2korr(next_entry);
+ if (next_nmoffset > hdr->nmpool_size)
+ return 1;
+ name->length= next_nmoffset - nmoffset;
+ }
+ return 0;
+}
+
+
+/**
+ Find entry in the names format header by the column number
+
+ @param hdr descriptor of dynamic column record
+ @param key name to find
+
+ @return pointer to the entry or NULL
+*/
+static uchar *find_entry_named(DYN_HEADER *hdr, LEX_STRING *key)
+{
+ uchar *min= hdr->header;
+ uchar *max= hdr->header + (hdr->column_count - 1) * hdr->entry_size;
+ uchar *mid;
+ DBUG_ASSERT(hdr->format == dyncol_fmt_str);
+ DBUG_ASSERT(hdr->nmpool != NULL);
+ while (max >= min)
+ {
+ LEX_STRING name;
+ int cmp;
+ mid= hdr->header + ((min - hdr->header) +
+ (max - hdr->header)) /
+ 2 /
+ hdr->entry_size * hdr->entry_size;
+ if (read_name(hdr, mid, &name))
+ return NULL;
+ cmp= mariadb_dyncol_column_cmp_named(&name, key);
+ if (cmp < 0)
+ min= mid + hdr->entry_size;
+ else if (cmp > 0)
+ max= mid - hdr->entry_size;
+ else
+ return mid;
+ }
+ return NULL;
+}
+
+
+/**
+ Write number in the buffer (backward direction - starts from the buffer end)
+
+ @return pointer on the number begining
+*/
+
+static char *backwritenum(char *chr, uint numkey)
+{
+ if (numkey == 0)
+ *(--chr)= '0';
+ else
+ while (numkey > 0)
+ {
+ *(--chr)= '0' + numkey % 10;
+ numkey/= 10;
+ }
+ return chr;
+}
+
+
+/**
Find column and fill information about it
- @param type Returns type of the column
- @param data Returns a pointer to the data
- @param length Returns length of the data
- @param offset_size Size of offset field in bytes
- @param column_count Number of column in the packed string
- @param data_end Pointer to the data end
- @param num Number of the column we want to fetch
- @param entry_pos NULL or place where to put reference to the entry
+ @param hdr descriptor of dynamic column record
+ @param numkey Number of the column to fetch (if strkey is NULL)
+ @param strkey Name of the column to fetch (or NULL)
@return 0 ok
@return 1 error in data
*/
static my_bool
-find_column(DYNAMIC_COLUMN_TYPE *type, uchar **data, size_t *length,
- uchar *header, size_t offset_size, uint column_count,
- uchar *data_end, uint num, uchar **entry_pos)
+find_column(DYN_HEADER *hdr, uint numkey, LEX_STRING *strkey)
{
- uchar *entry;
- size_t offset, total_data, header_size, entry_size;
- uchar key[2+4];
+ LEX_STRING nmkey;
+ char nmkeybuff[DYNCOL_NUM_CHAR]; /* to fit max 2 bytes number */
+ DBUG_ASSERT(hdr->header != NULL);
- if (!entry_pos)
- entry_pos= &entry;
-
- calc_param(&entry_size, &header_size, offset_size, column_count);
+ if (hdr->header + hdr->header_size > hdr->data_end)
+ return TRUE;
- if (header + header_size > data_end)
- return 1;
+ /* fix key */
+ if (hdr->format == dyncol_fmt_num && strkey != NULL)
+ {
+ char *end;
+ numkey= (uint) strtoul(strkey->str, &end, 10);
+ if (end != strkey->str + strkey->length)
+ {
+ /* we can't find non-numeric key among numeric ones */
+ hdr->type= DYN_COL_NULL;
+ return 0;
+ }
+ }
+ else if (hdr->format == dyncol_fmt_str && strkey == NULL)
+ {
+ nmkey.str= backwritenum(nmkeybuff + sizeof(nmkeybuff), numkey);
+ nmkey.length= (nmkeybuff + sizeof(nmkeybuff)) - nmkey.str;
+ strkey= &nmkey;
+ }
+ if (hdr->format == dyncol_fmt_num)
+ hdr->entry= find_entry_num(hdr, numkey);
+ else
+ hdr->entry= find_entry_named(hdr, strkey);
- int2store(key, num);
- entry= bsearch(key, header, (size_t)column_count, entry_size,
- &header_compar);
- if (!entry)
+ if (!hdr->entry)
{
/* Column not found */
- *type= DYN_COL_NULL;
- *entry_pos= NULL;
+ hdr->type= DYN_COL_NULL;
return 0;
}
- type_and_offset_read(type, &offset, entry, offset_size);
- total_data= data_end - (header + header_size);
- if (offset > total_data)
- return 1;
- *data= header + header_size + offset;
- *length= get_length(entry, header + header_size, offset_size,
- total_data);
+ hdr->length= hdr_interval_length(hdr, hdr->entry + hdr->entry_size);
+ hdr->data= hdr->dtpool + hdr->offset;
/*
Check that the found data is withing the ranges. This can happen if
we get data with wrong offsets.
*/
- if ((long) *length < 0 || offset + *length > total_data)
+ if (hdr->length == DYNCOL_OFFSET_ERROR ||
+ hdr->length > INT_MAX || hdr->offset > hdr->data_size)
return 1;
- *entry_pos= entry;
return 0;
}
/**
- Read and check the header of the dynamic string
+ Read and check the header of the dynamic string
+ @param hdr descriptor of dynamic column record
@param str Dynamic string
@retval FALSE OK
@@ -1354,22 +2151,31 @@ find_column(DYNAMIC_COLUMN_TYPE *type, uchar **data, size_t *length,
already have handled this case.
*/
-static inline my_bool read_fixed_header(DYNAMIC_COLUMN *str,
- size_t *offset_size,
- uint *column_count)
+static inline my_bool read_fixed_header(DYN_HEADER *hdr,
+ DYNAMIC_COLUMN *str)
{
DBUG_ASSERT(str != NULL && str->length != 0);
- if ((str->length < FIXED_HEADER_SIZE) ||
+ if ((str->length < 1) ||
(str->str[0] & (~DYNCOL_FLG_KNOWN)))
+ return 1;
+ hdr->format= ((str->str[0] & DYNCOL_FLG_NAMES) ?
+ dyncol_fmt_str:
+ dyncol_fmt_num);
+ if ((str->length < fmt_data[hdr->format].fixed_hdr))
return 1; /* Wrong header */
- *offset_size= (str->str[0] & DYNCOL_FLG_OFFSET) + 1;
- *column_count= uint2korr(str->str + 1);
+ hdr->offset_size= (str->str[0] & DYNCOL_FLG_OFFSET) + 1 +
+ (hdr->format == dyncol_fmt_str ? 1 : 0);
+ hdr->column_count= uint2korr(str->str + 1);
+ if (hdr->format == dyncol_fmt_str)
+ hdr->nmpool_size= uint2korr(str->str + 3); // only 2 bytes supported for now
+ else
+ hdr->nmpool_size= 0;
return 0;
}
/**
- Get dynamic column value
+ Get dynamic column value by column number
@param str The packed string to extract the column
@param column_nr Number of column to fetch
@@ -1378,258 +2184,236 @@ static inline my_bool read_fixed_header(DYNAMIC_COLUMN *str,
@return ER_DYNCOL_* return code
*/
-int dynamic_column_get(DYNAMIC_COLUMN *str, uint column_nr,
+enum enum_dyncol_func_result
+dynamic_column_get(DYNAMIC_COLUMN *str, uint column_nr,
DYNAMIC_COLUMN_VALUE *store_it_here)
{
- uchar *data;
- size_t offset_size, length;
- uint column_count;
- enum enum_dyncol_func_result rc= ER_DYNCOL_FORMAT;
+ return dynamic_column_get_internal(str, store_it_here, column_nr, NULL);
+}
- if (str->length == 0)
- goto null;
+enum enum_dyncol_func_result
+mariadb_dyncol_get(DYNAMIC_COLUMN *str, uint column_nr,
+ DYNAMIC_COLUMN_VALUE *store_it_here)
+{
+ return dynamic_column_get_internal(str, store_it_here, column_nr, NULL);
+}
- if (read_fixed_header(str, &offset_size, &column_count))
- goto err;
- if (column_count == 0)
- goto null;
+/**
+ Get dynamic column value by name
- if (find_column(&store_it_here->type, &data, &length,
- (uchar*)str->str + FIXED_HEADER_SIZE,
- offset_size, column_count, (uchar*)str->str + str->length,
- column_nr, NULL))
- goto err;
+ @param str The packed string to extract the column
+ @param name Name of column to fetch
+ @param store_it_here Where to store the extracted value
+
+ @return ER_DYNCOL_* return code
+*/
+
+enum enum_dyncol_func_result
+mariadb_dyncol_get_named(DYNAMIC_COLUMN *str, LEX_STRING *name,
+ DYNAMIC_COLUMN_VALUE *store_it_here)
+{
+ DBUG_ASSERT(name != NULL);
+ return dynamic_column_get_internal(str, store_it_here, 0, name);
+}
- switch (store_it_here->type) {
+
+static enum enum_dyncol_func_result
+dynamic_column_get_value(DYN_HEADER *hdr, DYNAMIC_COLUMN_VALUE *store_it_here)
+{
+ static enum enum_dyncol_func_result rc;
+ switch ((store_it_here->type= hdr->type)) {
case DYN_COL_INT:
- rc= dynamic_column_sint_read(store_it_here, data, length);
+ rc= dynamic_column_sint_read(store_it_here, hdr->data, hdr->length);
break;
case DYN_COL_UINT:
- rc= dynamic_column_uint_read(store_it_here, data, length);
+ rc= dynamic_column_uint_read(store_it_here, hdr->data, hdr->length);
break;
case DYN_COL_DOUBLE:
- rc= dynamic_column_double_read(store_it_here, data, length);
+ rc= dynamic_column_double_read(store_it_here, hdr->data, hdr->length);
break;
case DYN_COL_STRING:
- rc= dynamic_column_string_read(store_it_here, data, length);
+ rc= dynamic_column_string_read(store_it_here, hdr->data, hdr->length);
break;
case DYN_COL_DECIMAL:
- rc= dynamic_column_decimal_read(store_it_here, data, length);
+ rc= dynamic_column_decimal_read(store_it_here, hdr->data, hdr->length);
break;
case DYN_COL_DATETIME:
- rc= dynamic_column_date_time_read(store_it_here, data, length);
+ rc= dynamic_column_date_time_read(store_it_here, hdr->data,
+ hdr->length);
break;
case DYN_COL_DATE:
- rc= dynamic_column_date_read(store_it_here, data, length);
+ rc= dynamic_column_date_read(store_it_here, hdr->data, hdr->length);
break;
case DYN_COL_TIME:
- rc= dynamic_column_time_read(store_it_here, data, length);
+ rc= dynamic_column_time_read(store_it_here, hdr->data, hdr->length);
break;
case DYN_COL_NULL:
rc= ER_DYNCOL_OK;
break;
+ case DYN_COL_DYNCOL:
+ rc= dynamic_column_dyncol_read(store_it_here, hdr->data, hdr->length);
+ break;
default:
- goto err;
+ rc= ER_DYNCOL_FORMAT;
+ store_it_here->type= DYN_COL_NULL;
+ break;
}
return rc;
-
-null:
- rc= ER_DYNCOL_OK;
-err:
- store_it_here->type= DYN_COL_NULL;
- return rc;
}
/**
- Delete column with given number from the packed string
+ Get dynamic column value by number or name
- @param str The packed string to delete the column
- @param column_nr Number of column to delete
+ @param str The packed string to extract the column
+ @param store_it_here Where to store the extracted value
+ @param numkey Number of the column to fetch (if strkey is NULL)
+ @param strkey Name of the column to fetch (or NULL)
@return ER_DYNCOL_* return code
*/
-int dynamic_column_delete(DYNAMIC_COLUMN *str, uint column_nr)
+static enum enum_dyncol_func_result
+dynamic_column_get_internal(DYNAMIC_COLUMN *str,
+ DYNAMIC_COLUMN_VALUE *store_it_here,
+ uint num_key, LEX_STRING *str_key)
{
- uchar *data, *header_entry, *read, *write;
- size_t offset_size, new_offset_size, length, entry_size, new_entry_size,
- header_size, new_header_size, data_size, new_data_size,
- deleted_entry_offset;
- uint column_count, i;
- DYNAMIC_COLUMN_TYPE type;
+ DYN_HEADER header;
+ enum enum_dyncol_func_result rc= ER_DYNCOL_FORMAT;
+ bzero(&header, sizeof(header));
if (str->length == 0)
- return ER_DYNCOL_OK; /* no columns */
+ goto null;
- if (read_fixed_header(str, &offset_size, &column_count))
- return ER_DYNCOL_FORMAT;
+ if ((rc= init_read_hdr(&header, str)) < 0)
+ goto err;
- if (column_count == 0)
- {
- str->length= 0;
- return ER_DYNCOL_OK; /* no columns */
- }
+ if (header.column_count == 0)
+ goto null;
- if (find_column(&type, &data, &length, (uchar*)str->str + FIXED_HEADER_SIZE,
- offset_size, column_count, (uchar*)str->str + str->length,
- column_nr, &header_entry))
- return ER_DYNCOL_FORMAT;
+ if (find_column(&header, num_key, str_key))
+ goto err;
- if (type == DYN_COL_NULL)
- return ER_DYNCOL_OK; /* no such column */
+ rc= dynamic_column_get_value(&header, store_it_here);
+ return rc;
- if (column_count == 1)
- {
- /* delete the only column; Return empty string */
- str->length= 0;
- return ER_DYNCOL_OK;
- }
+null:
+ rc= ER_DYNCOL_OK;
+err:
+ store_it_here->type= DYN_COL_NULL;
+ return rc;
+}
- /* Calculate entry_size and header_size */
- calc_param(&entry_size, &header_size, offset_size, column_count);
- data_size= str->length - FIXED_HEADER_SIZE - header_size;
- new_data_size= data_size - length;
- if ((new_offset_size= dynamic_column_offset_bytes(new_data_size)) >=
- MAX_OFFSET_LENGTH)
- return ER_DYNCOL_LIMIT;
- DBUG_ASSERT(new_offset_size <= offset_size);
+/**
+ Check existence of the column in the packed string (by number)
- calc_param(&new_entry_size, &new_header_size,
- new_offset_size, column_count - 1);
+ @param str The packed string to check the column
+ @param column_nr Number of column to check
- deleted_entry_offset= ((data - (uchar*) str->str) -
- header_size - FIXED_HEADER_SIZE);
+ @return ER_DYNCOL_* return code
+*/
- /* rewrite header*/
- set_fixed_header(str, new_offset_size, column_count - 1);
- for (i= 0, write= read= (uchar *)str->str + FIXED_HEADER_SIZE;
- i < column_count;
- i++, read+= entry_size, write+= new_entry_size)
- {
- size_t offs;
- uint nm;
- DYNAMIC_COLUMN_TYPE tp;
- if (read == header_entry)
- {
-#ifndef DBUG_OFF
- nm= uint2korr(read);
- type_and_offset_read(&tp, &offs, read,
- offset_size);
- DBUG_ASSERT(nm == column_nr);
- DBUG_ASSERT(offs == deleted_entry_offset);
-#endif
- write-= new_entry_size; /* do not move writer */
- continue; /* skip removed field */
- }
+enum enum_dyncol_func_result
+dynamic_column_exists(DYNAMIC_COLUMN *str, uint column_nr)
+{
+ return dynamic_column_exists_internal(str, column_nr, NULL);
+}
- nm= uint2korr(read),
- type_and_offset_read(&tp, &offs, read,
- offset_size);
+enum enum_dyncol_func_result
+mariadb_dyncol_exists(DYNAMIC_COLUMN *str, uint column_nr)
+{
+ return dynamic_column_exists_internal(str, column_nr, NULL);
+}
- if (offs > deleted_entry_offset)
- offs-= length; /* data stored after removed data */
+/**
+ Check existence of the column in the packed string (by name)
- int2store(write, nm);
- type_and_offset_store(write, new_offset_size, tp, offs);
- }
+ @param str The packed string to check the column
+ @param name Name of column to check
- /* move data */
- {
- size_t first_chunk_len= ((data - (uchar *)str->str) -
- FIXED_HEADER_SIZE - header_size);
- size_t second_chunk_len= new_data_size - first_chunk_len;
- if (first_chunk_len)
- memmove(str->str + FIXED_HEADER_SIZE + new_header_size,
- str->str + FIXED_HEADER_SIZE + header_size,
- first_chunk_len);
- if (second_chunk_len)
- memmove(str->str +
- FIXED_HEADER_SIZE + new_header_size + first_chunk_len,
- str->str +
- FIXED_HEADER_SIZE + header_size + first_chunk_len + length,
- second_chunk_len);
- }
-
- /* fix str length */
- DBUG_ASSERT(str->length >=
- FIXED_HEADER_SIZE + new_header_size + new_data_size);
- str->length= FIXED_HEADER_SIZE + new_header_size + new_data_size;
+ @return ER_DYNCOL_* return code
+*/
- return ER_DYNCOL_OK;
+enum enum_dyncol_func_result
+mariadb_dyncol_exists_named(DYNAMIC_COLUMN *str, LEX_STRING *name)
+{
+ DBUG_ASSERT(name != NULL);
+ return dynamic_column_exists_internal(str, 0, name);
}
/**
- Check existence of the column in the packed string
+ Check existence of the column in the packed string (by name of number)
@param str The packed string to check the column
- @param column_nr Number of column to check
+ @param num_key Number of the column to fetch (if strkey is NULL)
+ @param str_key Name of the column to fetch (or NULL)
@return ER_DYNCOL_* return code
*/
-enum enum_dyncol_func_result
-dynamic_column_exists(DYNAMIC_COLUMN *str, uint column_nr)
+static enum enum_dyncol_func_result
+dynamic_column_exists_internal(DYNAMIC_COLUMN *str, uint num_key,
+ LEX_STRING *str_key)
{
- uchar *data;
- size_t offset_size, length;
- uint column_count;
- DYNAMIC_COLUMN_TYPE type;
+ DYN_HEADER header;
+ enum enum_dyncol_func_result rc;
+ bzero(&header, sizeof(header));
if (str->length == 0)
return ER_DYNCOL_NO; /* no columns */
- if (read_fixed_header(str, &offset_size, &column_count))
- return ER_DYNCOL_FORMAT;
+ if ((rc= init_read_hdr(&header, str)) < 0)
+ return rc;
- if (column_count == 0)
+ if (header.column_count == 0)
return ER_DYNCOL_NO; /* no columns */
- if (find_column(&type, &data, &length, (uchar*)str->str + FIXED_HEADER_SIZE,
- offset_size, column_count, (uchar*)str->str + str->length,
- column_nr, NULL))
+ if (find_column(&header, num_key, str_key))
return ER_DYNCOL_FORMAT;
- return (type != DYN_COL_NULL ? ER_DYNCOL_YES : ER_DYNCOL_NO);
+ return (header.type != DYN_COL_NULL ? ER_DYNCOL_YES : ER_DYNCOL_NO);
}
/**
- List not-null columns in the packed string
+ List not-null columns in the packed string (only numeric format)
@param str The packed string
@param array_of_uint Where to put reference on created array
@return ER_DYNCOL_* return code
*/
-
enum enum_dyncol_func_result
dynamic_column_list(DYNAMIC_COLUMN *str, DYNAMIC_ARRAY *array_of_uint)
{
+ DYN_HEADER header;
uchar *read;
- size_t offset_size, entry_size;
- uint column_count, i;
+ uint i;
+ enum enum_dyncol_func_result rc;
bzero(array_of_uint, sizeof(*array_of_uint)); /* In case of errors */
if (str->length == 0)
return ER_DYNCOL_OK; /* no columns */
- if (read_fixed_header(str, &offset_size, &column_count))
- return ER_DYNCOL_FORMAT;
+ if ((rc= init_read_hdr(&header, str)) < 0)
+ return rc;
- entry_size= COLUMN_NUMBER_SIZE + offset_size;
+ if (header.format != dyncol_fmt_num)
+ return ER_DYNCOL_FORMAT;
- if (entry_size * column_count + FIXED_HEADER_SIZE > str->length)
+ if (header.entry_size * header.column_count + FIXED_HEADER_SIZE >
+ str->length)
return ER_DYNCOL_FORMAT;
- if (init_dynamic_array(array_of_uint, sizeof(uint), column_count, 0))
+ if (my_init_dynamic_array(array_of_uint, sizeof(uint), header.column_count,
+ 0, MYF(0)))
return ER_DYNCOL_RESOURCE;
- for (i= 0, read= (uchar *)str->str + FIXED_HEADER_SIZE;
- i < column_count;
- i++, read+= entry_size)
+ for (i= 0, read= header.header;
+ i < header.column_count;
+ i++, read+= header.entry_size)
{
uint nm= uint2korr(read);
/* Insert can't never fail as it's pre-allocated above */
@@ -1638,73 +2422,247 @@ dynamic_column_list(DYNAMIC_COLUMN *str, DYNAMIC_ARRAY *array_of_uint)
return ER_DYNCOL_OK;
}
+/**
+ List not-null columns in the packed string (only numeric format)
+
+ @param str The packed string
+ @param array_of_uint Where to put reference on created array
+
+ @return ER_DYNCOL_* return code
+*/
+enum enum_dyncol_func_result
+mariadb_dyncol_list(DYNAMIC_COLUMN *str, uint *count, uint **nums)
+{
+ DYN_HEADER header;
+ uchar *read;
+ uint i;
+ enum enum_dyncol_func_result rc;
+
+ (*nums)= 0; /* In case of errors */
+ if (str->length == 0)
+ return ER_DYNCOL_OK; /* no columns */
+
+ if ((rc= init_read_hdr(&header, str)) < 0)
+ return rc;
+
+ if (header.format != dyncol_fmt_num)
+ return ER_DYNCOL_FORMAT;
+
+ if (header.entry_size * header.column_count + FIXED_HEADER_SIZE >
+ str->length)
+ return ER_DYNCOL_FORMAT;
+
+ if (!((*nums)= my_malloc(sizeof(uint) * header.column_count, MYF(0))))
+ return ER_DYNCOL_RESOURCE;
+
+ for (i= 0, read= header.header;
+ i < header.column_count;
+ i++, read+= header.entry_size)
+ {
+ (*nums)[i]= uint2korr(read);
+ }
+ (*count)= header.column_count;
+ return ER_DYNCOL_OK;
+}
+
+/**
+ List not-null columns in the packed string (any format)
+
+ @param str The packed string
+ @param count Number of names in the list
+ @param names Where to put names list (should be freed)
+
+ @return ER_DYNCOL_* return code
+*/
+
+enum enum_dyncol_func_result
+mariadb_dyncol_list_named(DYNAMIC_COLUMN *str, uint *count, LEX_STRING **names)
+{
+ DYN_HEADER header;
+ uchar *read;
+ char *pool;
+ struct st_service_funcs *fmt;
+ uint i;
+ enum enum_dyncol_func_result rc;
+
+ (*names)= 0; (*count)= 0;
+
+ if (str->length == 0)
+ return ER_DYNCOL_OK; /* no columns */
+
+ if ((rc= init_read_hdr(&header, str)) < 0)
+ return rc;
+
+ fmt= fmt_data + header.format;
+
+ if (header.entry_size * header.column_count + fmt->fixed_hdr >
+ str->length)
+ return ER_DYNCOL_FORMAT;
+
+ if (header.format == dyncol_fmt_num)
+ *names= my_malloc(sizeof(LEX_STRING) * header.column_count +
+ DYNCOL_NUM_CHAR * header.column_count, MYF(0));
+ else
+ *names= my_malloc(sizeof(LEX_STRING) * header.column_count +
+ header.nmpool_size + header.column_count, MYF(0));
+ if (!(*names))
+ return ER_DYNCOL_RESOURCE;
+ pool= ((char *)(*names)) + sizeof(LEX_STRING) * header.column_count;
+
+ for (i= 0, read= header.header;
+ i < header.column_count;
+ i++, read+= header.entry_size)
+ {
+ if (header.format == dyncol_fmt_num)
+ {
+ uint nm= uint2korr(read);
+ (*names)[i].str= pool;
+ pool+= DYNCOL_NUM_CHAR;
+ (*names)[i].length=
+ longlong2str(nm, (*names)[i].str, 10) - (*names)[i].str;
+ }
+ else
+ {
+ LEX_STRING tmp;
+ if (read_name(&header, read, &tmp))
+ return ER_DYNCOL_FORMAT;
+ (*names)[i].length= tmp.length;
+ (*names)[i].str= pool;
+ pool+= tmp.length + 1;
+ memcpy((*names)[i].str, (const void *)tmp.str, tmp.length);
+ (*names)[i].str[tmp.length]= '\0'; // just for safety
+ }
+ }
+ (*count)= header.column_count;
+ return ER_DYNCOL_OK;
+}
/**
Find the place of the column in the header or place where it should be put
- @param num Number of the column
- @param header Pointer to the header
- @param entry_size Size of a header entry
- @param column_count Number of columns in the packed string
- @param entry Return pointer to the entry or next entry
+ @param hdr descriptor of dynamic column record
+ @param key Name or number of column to fetch
+ (depends on string_key)
+ @param string_key True if we gave pointer to LEX_STRING.
@retval TRUE found
@retval FALSE pointer set to the next row
*/
static my_bool
-find_place(uint num, uchar *header, size_t entry_size,
- uint column_count, uchar **entry)
+find_place(DYN_HEADER *hdr, void *key, my_bool string_keys)
{
uint mid, start, end, val;
- int flag;
- LINT_INIT(flag); /* 100 % safe */
+ int UNINIT_VAR(flag);
+ LEX_STRING str;
+ char buff[DYNCOL_NUM_CHAR];
+ my_bool need_conversion= ((string_keys ? dyncol_fmt_str : dyncol_fmt_num) !=
+ hdr->format);
+ /* new format can't be numeric if the old one is names */
+ DBUG_ASSERT(string_keys ||
+ hdr->format == dyncol_fmt_num);
start= 0;
- end= column_count -1;
+ end= hdr->column_count -1;
mid= 1;
while (start != end)
{
- uint val;
- mid= (start + end) / 2;
- val= uint2korr(header + mid * entry_size);
- if ((flag= CMP_NUM(num, val)) <= 0)
- end= mid;
- else
- start= mid + 1;
+ uint val;
+ mid= (start + end) / 2;
+ hdr->entry= hdr->header + mid * hdr->entry_size;
+ if (!string_keys)
+ {
+ val= uint2korr(hdr->entry);
+ flag= CMP_NUM(*((uint *)key), val);
+ }
+ else
+ {
+ if (need_conversion)
+ {
+ str.str= backwritenum(buff + sizeof(buff), uint2korr(hdr->entry));
+ str.length= (buff + sizeof(buff)) - str.str;
+ }
+ else
+ {
+ DBUG_ASSERT(hdr->format == dyncol_fmt_str);
+ if (read_name(hdr, hdr->entry, &str))
+ return 0;
+ }
+ flag= mariadb_dyncol_column_cmp_named((LEX_STRING *)key, &str);
+ }
+ if (flag <= 0)
+ end= mid;
+ else
+ start= mid + 1;
}
+ hdr->entry= hdr->header + start * hdr->entry_size;
if (start != mid)
{
- val= uint2korr(header + start * entry_size);
- flag= CMP_NUM(num, val);
+ if (!string_keys)
+ {
+ val= uint2korr(hdr->entry);
+ flag= CMP_NUM(*((uint *)key), val);
+ }
+ else
+ {
+ if (need_conversion)
+ {
+ str.str= backwritenum(buff + sizeof(buff), uint2korr(hdr->entry));
+ str.length= (buff + sizeof(buff)) - str.str;
+ }
+ else
+ {
+ DBUG_ASSERT(hdr->format == dyncol_fmt_str);
+ if (read_name(hdr, hdr->entry, &str))
+ return 0;
+ }
+ flag= mariadb_dyncol_column_cmp_named((LEX_STRING *)key, &str);
+ }
}
- *entry= header + start * entry_size;
if (flag > 0)
- *entry+= entry_size; /* Point at next bigger key */
+ hdr->entry+= hdr->entry_size; /* Point at next bigger key */
return flag == 0;
}
/*
- Description of plan of adding/removing/updating a packed string
+ It is internal structure which describes a plan of changing the record
+ of dynamic columns
*/
typedef enum {PLAN_REPLACE, PLAN_ADD, PLAN_DELETE, PLAN_NOP} PLAN_ACT;
struct st_plan {
DYNAMIC_COLUMN_VALUE *val;
- uint *num;
+ void *key;
uchar *place;
size_t length;
- int hdelta, ddelta;
+ long long hdelta, ddelta, ndelta;
+ long long mv_offset, mv_length;
+ uint mv_end;
PLAN_ACT act;
};
typedef struct st_plan PLAN;
-static int plan_sort(const void *a, const void *b)
+/**
+ Sort function for plan by column number
+*/
+
+static int plan_sort_num(const void *a, const void *b)
{
- return ((PLAN *)a)->num[0] - ((PLAN *)b)->num[0];
+ return *((uint *)((PLAN *)a)->key) - *((uint *)((PLAN *)b)->key);
+}
+
+
+/**
+ Sort function for plan by column name
+*/
+
+static int plan_sort_named(const void *a, const void *b)
+{
+ return mariadb_dyncol_column_cmp_named((LEX_STRING *)((PLAN *)a)->key,
+ (LEX_STRING *)((PLAN *)b)->key);
}
#define DELTA_CHECK(S, D, C) \
@@ -1714,9 +2672,558 @@ static int plan_sort(const void *a, const void *b)
((S) < 0 && (D) > 0)) \
{ \
(C)= TRUE; \
- break; \
- } \
+ }
+/**
+ Update dynamic column by copying in a new record (string).
+
+ @param str Dynamic column record to change
+ @param plan Plan of changing the record
+ @param add_column_count number of records in the plan array.
+ @param hdr descriptor of old dynamic column record
+ @param new_hdr descriptor of new dynamic column record
+ @param convert need conversion from numeric to names format
+
+ @return ER_DYNCOL_* return code
+*/
+
+static enum enum_dyncol_func_result
+dynamic_column_update_copy(DYNAMIC_COLUMN *str, PLAN *plan,
+ uint add_column_count,
+ DYN_HEADER *hdr, DYN_HEADER *new_hdr,
+ my_bool convert)
+{
+ DYNAMIC_COLUMN tmp;
+ struct st_service_funcs *fmt= fmt_data + hdr->format,
+ *new_fmt= fmt_data + new_hdr->format;
+ uint i, j, k;
+ size_t all_headers_size;
+
+ if (dynamic_column_init_named(&tmp,
+ (new_fmt->fixed_hdr + new_hdr->header_size +
+ new_hdr->nmpool_size +
+ new_hdr->data_size + DYNCOL_SYZERESERVE)))
+ {
+ return ER_DYNCOL_RESOURCE;
+ }
+ bzero(tmp.str, new_fmt->fixed_hdr);
+ (*new_fmt->set_fixed_hdr)(&tmp, new_hdr);
+ /* Adjust tmp to contain whole the future header */
+ tmp.length= new_fmt->fixed_hdr + new_hdr->header_size + new_hdr->nmpool_size;
+
+
+ /*
+ Copy data to the new string
+ i= index in array of changes
+ j= index in packed string header index
+ */
+ new_hdr->entry= new_hdr->header;
+ new_hdr->name= new_hdr->nmpool;
+ all_headers_size= new_fmt->fixed_hdr +
+ new_hdr->header_size + new_hdr->nmpool_size;
+ for (i= 0, j= 0; i < add_column_count || j < hdr->column_count; i++)
+ {
+ size_t UNINIT_VAR(first_offset);
+ uint start= j, end;
+
+ /*
+ Search in i and j for the next column to add from i and where to
+ add.
+ */
+
+ while (i < add_column_count && plan[i].act == PLAN_NOP)
+ i++; /* skip NOP */
+
+ if (i == add_column_count)
+ j= end= hdr->column_count;
+ else
+ {
+ /*
+ old data portion. We don't need to check that j < column_count
+ as plan[i].place is guaranteed to have a pointer inside the
+ data.
+ */
+ while (hdr->header + j * hdr->entry_size < plan[i].place)
+ j++;
+ end= j;
+ if ((plan[i].act == PLAN_REPLACE || plan[i].act == PLAN_DELETE))
+ j++; /* data at 'j' will be removed */
+ }
+
+ /*
+ Adjust all headers since last loop.
+ We have to do this as the offset for data has moved
+ */
+ for (k= start; k < end; k++)
+ {
+ uchar *read= hdr->header + k * hdr->entry_size;
+ void *key;
+ LEX_STRING name;
+ size_t offs;
+ uint nm;
+ DYNAMIC_COLUMN_TYPE tp;
+ char buff[DYNCOL_NUM_CHAR];
+
+ if (hdr->format == dyncol_fmt_num)
+ {
+ if (convert)
+ {
+ name.str= backwritenum(buff + sizeof(buff), uint2korr(read));
+ name.length= (buff + sizeof(buff)) - name.str;
+ key= &name;
+ }
+ else
+ {
+ nm= uint2korr(read); /* Column nummber */
+ key= &nm;
+ }
+ }
+ else
+ {
+ if (read_name(hdr, read, &name))
+ goto err;
+ key= &name;
+ }
+ if (fmt->type_and_offset_read(&tp, &offs,
+ read + fmt->fixed_hdr_entry,
+ hdr->offset_size))
+ goto err;
+ if (k == start)
+ first_offset= offs;
+ else if (offs < first_offset)
+ goto err;
+
+ offs+= plan[i].ddelta;
+ {
+ DYNAMIC_COLUMN_VALUE val;
+ val.type= tp; // only the type used in the header
+ if ((*new_fmt->put_header_entry)(new_hdr, key, &val, offs))
+ goto err;
+ }
+ }
+
+ /* copy first the data that was not replaced in original packed data */
+ if (start < end)
+ {
+ size_t data_size;
+ /* Add old data last in 'tmp' */
+ hdr->entry= hdr->header + start * hdr->entry_size;
+ data_size=
+ hdr_interval_length(hdr, hdr->header + end * hdr->entry_size);
+ if (data_size == DYNCOL_OFFSET_ERROR ||
+ (long) data_size < 0 ||
+ data_size > hdr->data_size - first_offset)
+ goto err;
+
+ memcpy(tmp.str + tmp.length, (char *)hdr->dtpool + first_offset,
+ data_size);
+ tmp.length+= data_size;
+ }
+
+ /* new data adding */
+ if (i < add_column_count)
+ {
+ if( plan[i].act == PLAN_ADD || plan[i].act == PLAN_REPLACE)
+ {
+ if ((*new_fmt->put_header_entry)(new_hdr, plan[i].key,
+ plan[i].val,
+ tmp.length - all_headers_size))
+ goto err;
+ data_store(&tmp, plan[i].val, new_hdr->format); /* Append new data */
+ }
+ }
+ }
+ dynamic_column_column_free(str);
+ *str= tmp;
+ return ER_DYNCOL_OK;
+err:
+ dynamic_column_column_free(&tmp);
+ return ER_DYNCOL_FORMAT;
+}
+
+static enum enum_dyncol_func_result
+dynamic_column_update_move_left(DYNAMIC_COLUMN *str, PLAN *plan,
+ size_t offset_size,
+ size_t entry_size,
+ size_t header_size,
+ size_t new_offset_size,
+ size_t new_entry_size,
+ size_t new_header_size,
+ uint column_count,
+ uint new_column_count,
+ uint add_column_count,
+ uchar *header_end,
+ size_t max_offset)
+{
+ uchar *write;
+ uchar *header_base= (uchar *)str->str + FIXED_HEADER_SIZE;
+ uint i, j, k;
+ size_t curr_offset;
+
+ write= (uchar *)str->str + FIXED_HEADER_SIZE;
+ set_fixed_header(str, (uint)new_offset_size, new_column_count);
+
+ /*
+ Move headers first.
+ i= index in array of changes
+ j= index in packed string header index
+ */
+ for (curr_offset= 0, i= 0, j= 0;
+ i < add_column_count || j < column_count;
+ i++)
+ {
+ size_t UNINIT_VAR(first_offset);
+ uint start= j, end;
+
+ /*
+ Search in i and j for the next column to add from i and where to
+ add.
+ */
+
+ while (i < add_column_count && plan[i].act == PLAN_NOP)
+ i++; /* skip NOP */
+
+ if (i == add_column_count)
+ j= end= column_count;
+ else
+ {
+ /*
+ old data portion. We don't need to check that j < column_count
+ as plan[i].place is guaranteed to have a pointer inside the
+ data.
+ */
+ while (header_base + j * entry_size < plan[i].place)
+ j++;
+ end= j;
+ if ((plan[i].act == PLAN_REPLACE || plan[i].act == PLAN_DELETE))
+ j++; /* data at 'j' will be removed */
+ }
+ plan[i].mv_end= end;
+
+ {
+ DYNAMIC_COLUMN_TYPE tp;
+ if (type_and_offset_read_num(&tp, &first_offset,
+ header_base + start * entry_size +
+ COLUMN_NUMBER_SIZE, offset_size))
+ return ER_DYNCOL_FORMAT;
+ }
+ /* find data to be moved */
+ if (start < end)
+ {
+ size_t data_size=
+ get_length_interval(header_base + start * entry_size,
+ header_base + end * entry_size,
+ header_end, offset_size, max_offset);
+ if (data_size == DYNCOL_OFFSET_ERROR ||
+ (long) data_size < 0 ||
+ data_size > max_offset - first_offset)
+ {
+ str->length= 0; // just something valid
+ return ER_DYNCOL_FORMAT;
+ }
+ DBUG_ASSERT(curr_offset == first_offset + plan[i].ddelta);
+ plan[i].mv_offset= first_offset;
+ plan[i].mv_length= data_size;
+ curr_offset+= data_size;
+ }
+ else
+ {
+ plan[i].mv_length= 0;
+ plan[i].mv_offset= curr_offset;
+ }
+
+ if (plan[i].ddelta == 0 && offset_size == new_offset_size &&
+ plan[i].act != PLAN_DELETE)
+ write+= entry_size * (end - start);
+ else
+ {
+ /*
+ Adjust all headers since last loop.
+ We have to do this as the offset for data has moved
+ */
+ for (k= start; k < end; k++)
+ {
+ uchar *read= header_base + k * entry_size;
+ size_t offs;
+ uint nm;
+ DYNAMIC_COLUMN_TYPE tp;
+
+ nm= uint2korr(read); /* Column nummber */
+ if (type_and_offset_read_num(&tp, &offs, read + COLUMN_NUMBER_SIZE,
+ offset_size))
+ return ER_DYNCOL_FORMAT;
+
+ if (k > start && offs < first_offset)
+ {
+ str->length= 0; // just something valid
+ return ER_DYNCOL_FORMAT;
+ }
+
+ offs+= plan[i].ddelta;
+ int2store(write, nm);
+ /* write rest of data at write + COLUMN_NUMBER_SIZE */
+ type_and_offset_store_num(write, new_offset_size, tp, offs);
+ write+= new_entry_size;
+ }
+ }
+
+ /* new data adding */
+ if (i < add_column_count)
+ {
+ if( plan[i].act == PLAN_ADD || plan[i].act == PLAN_REPLACE)
+ {
+ int2store(write, *((uint *)plan[i].key));
+ type_and_offset_store_num(write, new_offset_size,
+ plan[i].val[0].type,
+ curr_offset);
+ write+= new_entry_size;
+ curr_offset+= plan[i].length;
+ }
+ }
+ }
+
+ /*
+ Move data.
+ i= index in array of changes
+ j= index in packed string header index
+ */
+ str->length= (FIXED_HEADER_SIZE + new_header_size);
+ for (i= 0, j= 0;
+ i < add_column_count || j < column_count;
+ i++)
+ {
+ uint start= j, end;
+
+ /*
+ Search in i and j for the next column to add from i and where to
+ add.
+ */
+
+ while (i < add_column_count && plan[i].act == PLAN_NOP)
+ i++; /* skip NOP */
+
+ j= end= plan[i].mv_end;
+ if (i != add_column_count &&
+ (plan[i].act == PLAN_REPLACE || plan[i].act == PLAN_DELETE))
+ j++;
+
+ /* copy first the data that was not replaced in original packed data */
+ if (start < end && plan[i].mv_length)
+ {
+ memmove((header_base + new_header_size +
+ plan[i].mv_offset + plan[i].ddelta),
+ header_base + header_size + plan[i].mv_offset,
+ plan[i].mv_length);
+ }
+ str->length+= plan[i].mv_length;
+
+ /* new data adding */
+ if (i < add_column_count)
+ {
+ if( plan[i].act == PLAN_ADD || plan[i].act == PLAN_REPLACE)
+ {
+ data_store(str, plan[i].val, dyncol_fmt_num);/* Append new data */
+ }
+ }
+ }
+ return ER_DYNCOL_OK;
+}
+
+#ifdef UNUSED
+static enum enum_dyncol_func_result
+dynamic_column_update_move_right(DYNAMIC_COLUMN *str, PLAN *plan,
+ size_t offset_size,
+ size_t entry_size,
+ size_t header_size,
+ size_t new_offset_size,
+ size_t new_entry_size,
+ size_t new_header_size,
+ uint column_count,
+ uint new_column_count,
+ uint add_column_count,
+ uchar *header_end,
+ size_t max_offset)
+{
+ uchar *write;
+ uchar *header_base= (uchar *)str->str + FIXED_HEADER_SIZE;
+ uint i, j, k;
+ size_t curr_offset;
+
+ write= (uchar *)str->str + FIXED_HEADER_SIZE;
+ set_fixed_header(str, new_offset_size, new_column_count);
+
+ /*
+ Move data first.
+ i= index in array of changes
+ j= index in packed string header index
+ */
+ for (curr_offset= 0, i= 0, j= 0;
+ i < add_column_count || j < column_count;
+ i++)
+ {
+ size_t UNINIT_VAR(first_offset);
+ uint start= j, end;
+
+ /*
+ Search in i and j for the next column to add from i and where to
+ add.
+ */
+
+ while (i < add_column_count && plan[i].act == PLAN_NOP)
+ i++; /* skip NOP */
+
+ if (i == add_column_count)
+ j= end= column_count;
+ else
+ {
+ /*
+ old data portion. We don't need to check that j < column_count
+ as plan[i].place is guaranteed to have a pointer inside the
+ data.
+ */
+ while (header_base + j * entry_size < plan[i].place)
+ j++;
+ end= j;
+ if ((plan[i].act == PLAN_REPLACE || plan[i].act == PLAN_DELETE))
+ j++; /* data at 'j' will be removed */
+ }
+ plan[i].mv_end= end;
+
+ {
+ DYNAMIC_COLUMN_TYPE tp;
+ type_and_offset_read_num(&tp, &first_offset,
+ header_base +
+ start * entry_size + COLUMN_NUMBER_SIZE,
+ offset_size);
+ }
+ /* find data to be moved */
+ if (start < end)
+ {
+ size_t data_size=
+ get_length_interval(header_base + start * entry_size,
+ header_base + end * entry_size,
+ header_end, offset_size, max_offset);
+ if (data_size == DYNCOL_OFFSET_ERROR ||
+ (long) data_size < 0 ||
+ data_size > max_offset - first_offset)
+ {
+ str->length= 0; // just something valid
+ return ER_DYNCOL_FORMAT;
+ }
+ DBUG_ASSERT(curr_offset == first_offset + plan[i].ddelta);
+ plan[i].mv_offset= first_offset;
+ plan[i].mv_length= data_size;
+ curr_offset+= data_size;
+ }
+ else
+ {
+ plan[i].mv_length= 0;
+ plan[i].mv_offset= curr_offset;
+ }
+
+ if (plan[i].ddelta == 0 && offset_size == new_offset_size &&
+ plan[i].act != PLAN_DELETE)
+ write+= entry_size * (end - start);
+ else
+ {
+ /*
+ Adjust all headers since last loop.
+ We have to do this as the offset for data has moved
+ */
+ for (k= start; k < end; k++)
+ {
+ uchar *read= header_base + k * entry_size;
+ size_t offs;
+ uint nm;
+ DYNAMIC_COLUMN_TYPE tp;
+
+ nm= uint2korr(read); /* Column nummber */
+ type_and_offset_read_num(&tp, &offs, read + COLUMN_NUMBER_SIZE,
+ offset_size);
+ if (k > start && offs < first_offset)
+ {
+ str->length= 0; // just something valid
+ return ER_DYNCOL_FORMAT;
+ }
+
+ offs+= plan[i].ddelta;
+ int2store(write, nm);
+ /* write rest of data at write + COLUMN_NUMBER_SIZE */
+ if (type_and_offset_store_num(write, new_offset_size, tp, offs))
+ {
+ str->length= 0; // just something valid
+ return ER_DYNCOL_FORMAT;
+ }
+ write+= new_entry_size;
+ }
+ }
+
+ /* new data adding */
+ if (i < add_column_count)
+ {
+ if( plan[i].act == PLAN_ADD || plan[i].act == PLAN_REPLACE)
+ {
+ int2store(write, *((uint *)plan[i].key));
+ if (type_and_offset_store_num(write, new_offset_size,
+ plan[i].val[0].type,
+ curr_offset))
+ {
+ str->length= 0; // just something valid
+ return ER_DYNCOL_FORMAT;
+ }
+ write+= new_entry_size;
+ curr_offset+= plan[i].length;
+ }
+ }
+ }
+
+ /*
+ Move headers.
+ i= index in array of changes
+ j= index in packed string header index
+ */
+ str->length= (FIXED_HEADER_SIZE + new_header_size);
+ for (i= 0, j= 0;
+ i < add_column_count || j < column_count;
+ i++)
+ {
+ uint start= j, end;
+
+ /*
+ Search in i and j for the next column to add from i and where to
+ add.
+ */
+
+ while (i < add_column_count && plan[i].act == PLAN_NOP)
+ i++; /* skip NOP */
+
+ j= end= plan[i].mv_end;
+ if (i != add_column_count &&
+ (plan[i].act == PLAN_REPLACE || plan[i].act == PLAN_DELETE))
+ j++;
+
+ /* copy first the data that was not replaced in original packed data */
+ if (start < end && plan[i].mv_length)
+ {
+ memmove((header_base + new_header_size +
+ plan[i].mv_offset + plan[i].ddelta),
+ header_base + header_size + plan[i].mv_offset,
+ plan[i].mv_length);
+ }
+ str->length+= plan[i].mv_length;
+
+ /* new data adding */
+ if (i < add_column_count)
+ {
+ if( plan[i].act == PLAN_ADD || plan[i].act == PLAN_REPLACE)
+ {
+ data_store(str, plan[i].val, dyncol_fmt_num); /* Append new data */
+ }
+ }
+ }
+ return ER_DYNCOL_OK;
+}
+#endif
/**
Update the packed string with the given columns
@@ -1728,6 +3235,8 @@ static int plan_sort(const void *a, const void *b)
@return ER_DYNCOL_* return code
*/
+/* plan allocated on the stack */
+#define IN_PLACE_PLAN 4
enum enum_dyncol_func_result
dynamic_column_update_many(DYNAMIC_COLUMN *str,
@@ -1735,39 +3244,95 @@ dynamic_column_update_many(DYNAMIC_COLUMN *str,
uint *column_numbers,
DYNAMIC_COLUMN_VALUE *values)
{
- PLAN *plan;
- uchar *header_end;
- long data_delta= 0;
- uint i, j, k;
- uint new_column_count, column_count, not_null;
+ return dynamic_column_update_many_fmt(str, add_column_count, column_numbers,
+ values, FALSE);
+}
+
+enum enum_dyncol_func_result
+mariadb_dyncol_update_many(DYNAMIC_COLUMN *str,
+ uint add_column_count,
+ uint *column_numbers,
+ DYNAMIC_COLUMN_VALUE *values)
+{
+ return dynamic_column_update_many_fmt(str, add_column_count, column_numbers,
+ values, FALSE);
+}
+
+enum enum_dyncol_func_result
+mariadb_dyncol_update_many_named(DYNAMIC_COLUMN *str,
+ uint add_column_count,
+ LEX_STRING *column_names,
+ DYNAMIC_COLUMN_VALUE *values)
+{
+ return dynamic_column_update_many_fmt(str, add_column_count, column_names,
+ values, TRUE);
+}
+
+static uint numlen(uint val)
+{
+ uint res;
+ if (val == 0)
+ return 1;
+ res= 0;
+ while(val)
+ {
+ res++;
+ val/=10;
+ }
+ return res;
+}
+
+static enum enum_dyncol_func_result
+dynamic_column_update_many_fmt(DYNAMIC_COLUMN *str,
+ uint add_column_count,
+ void *column_keys,
+ DYNAMIC_COLUMN_VALUE *values,
+ my_bool string_keys)
+{
+ PLAN *plan, *alloc_plan= NULL, in_place_plan[IN_PLACE_PLAN];
+ uchar *element;
+ DYN_HEADER header, new_header;
+ struct st_service_funcs *fmt, *new_fmt;
+ long long data_delta= 0, name_delta= 0;
+ uint i;
+ uint not_null;
+ long long header_delta= 0;
+ long long header_delta_sign, data_delta_sign;
+ int copy= FALSE;
enum enum_dyncol_func_result rc;
- int header_delta;
- size_t offset_size, entry_size, header_size, data_size;
- size_t new_offset_size, new_entry_size, new_header_size, new_data_size;
- size_t max_offset;
+ my_bool convert;
if (add_column_count == 0)
return ER_DYNCOL_OK;
+ bzero(&header, sizeof(header));
+ bzero(&new_header, sizeof(new_header));
+ new_header.format= (string_keys ? dyncol_fmt_str : dyncol_fmt_num);
+ new_fmt= fmt_data + new_header.format;
+
/*
Get columns in column order. As the data in 'str' is already
in column order this allows to replace all columns in one loop.
*/
-
- if (!(plan= my_malloc(sizeof(PLAN) * (add_column_count + 1), MYF(0))))
+ if (IN_PLACE_PLAN > add_column_count)
+ plan= in_place_plan;
+ else if (!(alloc_plan= plan=
+ my_malloc(sizeof(PLAN) * (add_column_count + 1), MYF(0))))
return ER_DYNCOL_RESOURCE;
not_null= add_column_count;
- for (i= 0; i < add_column_count; i++)
+ for (i= 0, element= (uchar *) column_keys;
+ i < add_column_count;
+ i++, element+= new_fmt->key_size_in_array)
{
- if (column_numbers[i] > UINT_MAX16)
+ if ((*new_fmt->check_limit)(&element))
{
rc= ER_DYNCOL_DATA;
goto end;
}
plan[i].val= values + i;
- plan[i].num= column_numbers + i;
+ plan[i].key= element;
if (values[i].type == DYN_COL_NULL)
not_null--;
@@ -1783,22 +3348,32 @@ dynamic_column_update_many(DYNAMIC_COLUMN *str,
}
/* Check that header is ok */
- if (read_fixed_header(str, &offset_size, &column_count))
- {
- rc= ER_DYNCOL_FORMAT;
+ if ((rc= init_read_hdr(&header, str)) < 0)
goto end;
- }
- if (column_count == 0)
+ fmt= fmt_data + header.format;
+ /* new format can't be numeric if the old one is names */
+ DBUG_ASSERT(new_header.format == dyncol_fmt_str ||
+ header.format == dyncol_fmt_num);
+ if (header.column_count == 0)
goto create_new_string;
- qsort(plan, (size_t)add_column_count, sizeof(PLAN), &plan_sort);
+ qsort(plan, (size_t)add_column_count, sizeof(PLAN), new_fmt->plan_sort);
- new_column_count= column_count;
- calc_param(&entry_size, &header_size, offset_size, column_count);
- max_offset= str->length - (FIXED_HEADER_SIZE + header_size);
- header_end= (uchar*) str->str + FIXED_HEADER_SIZE + header_size;
+ new_header.column_count= header.column_count;
+ new_header.nmpool_size= header.nmpool_size;
+ if ((convert= (new_header.format == dyncol_fmt_str &&
+ header.format == dyncol_fmt_num)))
+ {
+ DBUG_ASSERT(new_header.nmpool_size == 0);
+ for(i= 0, header.entry= header.header;
+ i < header.column_count;
+ i++, header.entry+= header.entry_size)
+ {
+ new_header.nmpool_size+= numlen(uint2korr(header.entry));
+ }
+ }
- if (header_size + FIXED_HEADER_SIZE > str->length)
+ if (fmt->fixed_hdr + header.header_size + header.nmpool_size > str->length)
{
rc= ER_DYNCOL_FORMAT;
goto end;
@@ -1808,17 +3383,15 @@ dynamic_column_update_many(DYNAMIC_COLUMN *str,
Calculate how many columns and data is added/deleted and make a 'plan'
for each of them.
*/
- header_delta= 0;
for (i= 0; i < add_column_count; i++)
{
- uchar *entry;
-
/*
For now we don't allow creating two columns with the same number
at the time of create. This can be fixed later to just use the later
by comparing the pointers.
*/
- if (i < add_column_count - 1 && plan[i].num[0] == plan[i + 1].num[0])
+ if (i < add_column_count - 1 &&
+ new_fmt->column_sort(&plan[i].key, &plan[i + 1].key) == 0)
{
rc= ER_DYNCOL_DATA;
goto end;
@@ -1826,26 +3399,42 @@ dynamic_column_update_many(DYNAMIC_COLUMN *str,
/* Set common variables for all plans */
plan[i].ddelta= data_delta;
+ plan[i].ndelta= name_delta;
/* get header delta in entries */
plan[i].hdelta= header_delta;
plan[i].length= 0; /* Length if NULL */
- if (find_place(plan[i].num[0],
- (uchar *)str->str + FIXED_HEADER_SIZE,
- entry_size, column_count, &entry))
+ if (find_place(&header, plan[i].key, string_keys))
{
- size_t entry_data_size;
+ size_t entry_data_size, entry_name_size= 0;
/* Data existed; We have to replace or delete it */
- entry_data_size= get_length(entry, header_end,
- offset_size, max_offset);
- if ((long) entry_data_size < 0)
+ entry_data_size= hdr_interval_length(&header, header.entry +
+ header.entry_size);
+ if (entry_data_size == DYNCOL_OFFSET_ERROR ||
+ (long) entry_data_size < 0)
{
rc= ER_DYNCOL_FORMAT;
goto end;
}
+ if (new_header.format == dyncol_fmt_str)
+ {
+ if (header.format == dyncol_fmt_str)
+ {
+ LEX_STRING name;
+ if (read_name(&header, header.entry, &name))
+ {
+ rc= ER_DYNCOL_FORMAT;
+ goto end;
+ }
+ entry_name_size= name.length;
+ }
+ else
+ entry_name_size= numlen(uint2korr(header.entry));
+ }
+
if (plan[i].val->type == DYN_COL_NULL)
{
/* Inserting a NULL means delete the old data */
@@ -1853,6 +3442,7 @@ dynamic_column_update_many(DYNAMIC_COLUMN *str,
plan[i].act= PLAN_DELETE; /* Remove old value */
header_delta--; /* One row less in header */
data_delta-= entry_data_size; /* Less data to store */
+ name_delta-= entry_name_size;
}
else
{
@@ -1860,13 +3450,18 @@ dynamic_column_update_many(DYNAMIC_COLUMN *str,
plan[i].act= PLAN_REPLACE;
/* get data delta in bytes */
- if ((plan[i].length= dynamic_column_value_len(plan[i].val)) ==
+ if ((plan[i].length= dynamic_column_value_len(plan[i].val,
+ new_header.format)) ==
(size_t) ~0)
{
rc= ER_DYNCOL_DATA;
goto end;
}
data_delta+= plan[i].length - entry_data_size;
+ if (new_header.format == dyncol_fmt_str)
+ {
+ name_delta+= ((LEX_STRING *)(plan[i].key))->length - entry_name_size;
+ }
}
}
else
@@ -1884,226 +3479,849 @@ dynamic_column_update_many(DYNAMIC_COLUMN *str,
plan[i].act= PLAN_ADD;
header_delta++; /* One more row in header */
/* get data delta in bytes */
- if ((plan[i].length= dynamic_column_value_len(plan[i].val)) ==
+ if ((plan[i].length= dynamic_column_value_len(plan[i].val,
+ new_header.format)) ==
(size_t) ~0)
{
rc= ER_DYNCOL_DATA;
goto end;
}
data_delta+= plan[i].length;
+ if (new_header.format == dyncol_fmt_str)
+ name_delta+= ((LEX_STRING *)plan[i].key)->length;
}
}
- plan[i].place= entry;
+ plan[i].place= header.entry;
}
plan[add_column_count].hdelta= header_delta;
plan[add_column_count].ddelta= data_delta;
- new_column_count= column_count + header_delta;
+ plan[add_column_count].act= PLAN_NOP;
+ plan[add_column_count].place= header.dtpool;
+
+ new_header.column_count= (uint)(header.column_count + header_delta);
/*
Check if it is only "increasing" or only "decreasing" plan for (header
and data separately).
*/
- data_size= str->length - header_size - FIXED_HEADER_SIZE;
- new_data_size= data_size + data_delta;
- if ((new_offset_size= dynamic_column_offset_bytes(new_data_size)) >=
- MAX_OFFSET_LENGTH)
+ new_header.data_size= header.data_size + data_delta;
+ new_header.nmpool_size= new_header.nmpool_size + name_delta;
+ DBUG_ASSERT(new_header.format != dyncol_fmt_num ||
+ new_header.nmpool_size == 0);
+ if ((new_header.offset_size=
+ new_fmt->dynamic_column_offset_bytes(new_header.data_size)) >=
+ new_fmt->max_offset_size)
{
rc= ER_DYNCOL_LIMIT;
goto end;
}
-#ifdef NOT_IMPLEMENTED
- /* if (new_offset_size != offset_size) then we have to rewrite header */
- header_delta_sign= new_offset_size - offset_size;
+ copy= ((header.format != new_header.format) ||
+ (new_header.format == dyncol_fmt_str));
+ /* if (new_header.offset_size!=offset_size) then we have to rewrite header */
+ header_delta_sign=
+ ((int)new_header.offset_size + new_fmt->fixed_hdr_entry) -
+ ((int)header.offset_size + fmt->fixed_hdr_entry);
data_delta_sign= 0;
- for (i= 0; i < add_column_count; i++)
+ // plan[add_column_count] contains last deltas.
+ for (i= 0; i <= add_column_count && !copy; i++)
{
/* This is the check for increasing/decreasing */
DELTA_CHECK(header_delta_sign, plan[i].hdelta, copy);
DELTA_CHECK(data_delta_sign, plan[i].ddelta, copy);
}
-#endif
- calc_param(&new_entry_size, &new_header_size,
- new_offset_size, new_column_count);
+ calc_param(&new_header.entry_size, &new_header.header_size,
+ new_fmt->fixed_hdr_entry,
+ new_header.offset_size, new_header.column_count);
/*
- The following code always make a copy. In future we can do a more
- optimized version when data is only increasing / decreasing.
+ Need copy because:
+ 1, Header/data parts moved in different directions.
+ 2. There is no enough allocated space in the string.
+ 3. Header and data moved in different directions.
*/
+ if (copy || /*1.*/
+ str->max_length < str->length + header_delta + data_delta || /*2.*/
+ ((header_delta_sign < 0 && data_delta_sign > 0) ||
+ (header_delta_sign > 0 && data_delta_sign < 0))) /*3.*/
+ rc= dynamic_column_update_copy(str, plan, add_column_count,
+ &header, &new_header,
+ convert);
+ else
+ if (header_delta_sign < 0)
+ rc= dynamic_column_update_move_left(str, plan, header.offset_size,
+ header.entry_size,
+ header.header_size,
+ new_header.offset_size,
+ new_header.entry_size,
+ new_header.header_size,
+ header.column_count,
+ new_header.column_count,
+ add_column_count, header.dtpool,
+ header.data_size);
+ else
+ /*
+ rc= dynamic_column_update_move_right(str, plan, offset_size,
+ entry_size, header_size,
+ new_header.offset_size,
+ new_header.entry_size,
+ new_heder.header_size, column_count,
+ new_header.column_count,
+ add_column_count, header_end,
+ header.data_size);
+ */
+ rc= dynamic_column_update_copy(str, plan, add_column_count,
+ &header, &new_header,
+ convert);
+end:
+ my_free(alloc_plan);
+ return rc;
+
+create_new_string:
+ /* There is no columns from before, so let's just add the new ones */
+ rc= ER_DYNCOL_OK;
+ my_free(alloc_plan);
+ if (not_null != 0)
+ rc= dynamic_column_create_many_internal_fmt(str, add_column_count,
+ (uint*)column_keys, values,
+ str->str == NULL,
+ string_keys);
+ goto end;
+}
+
+
+/**
+ Update the packed string with the given column
+
+ @param str String where to write the data
+ @param column_number Array of columns number
+ @param values Array of columns values
+
+ @return ER_DYNCOL_* return code
+*/
+
+
+int dynamic_column_update(DYNAMIC_COLUMN *str, uint column_nr,
+ DYNAMIC_COLUMN_VALUE *value)
+{
+ return dynamic_column_update_many(str, 1, &column_nr, value);
+}
+
- /*if (copy) */
+enum enum_dyncol_func_result
+mariadb_dyncol_check(DYNAMIC_COLUMN *str)
+{
+ struct st_service_funcs *fmt;
+ enum enum_dyncol_func_result rc= ER_DYNCOL_FORMAT;
+ DYN_HEADER header;
+ uint i;
+ size_t data_offset= 0, name_offset= 0;
+ size_t prev_data_offset= 0, prev_name_offset= 0;
+ LEX_STRING name= {0,0}, prev_name= {0,0};
+ uint num= 0, prev_num= 0;
+ void *key, *prev_key;
+ enum enum_dynamic_column_type type= DYN_COL_NULL, prev_type= DYN_COL_NULL;
+
+ DBUG_ENTER("dynamic_column_check");
+
+ if (str->length == 0)
+ {
+ DBUG_PRINT("info", ("empty string is OK"));
+ DBUG_RETURN(ER_DYNCOL_OK);
+ }
+
+ bzero(&header, sizeof(header));
+
+ /* Check that header is OK */
+ if (read_fixed_header(&header, str))
+ {
+ DBUG_PRINT("info", ("Reading fixed string header failed"));
+ goto end;
+ }
+ fmt= fmt_data + header.format;
+ calc_param(&header.entry_size, &header.header_size,
+ fmt->fixed_hdr_entry, header.offset_size,
+ header.column_count);
+ /* headers are out of string length (no space for data and part of headers) */
+ if (fmt->fixed_hdr + header.header_size + header.nmpool_size > str->length)
+ {
+ DBUG_PRINT("info", ("Fixed header: %u Header size: %u "
+ "Name pool size: %u but Strig length: %u",
+ (uint)fmt->fixed_hdr,
+ (uint)header.header_size,
+ (uint)header.nmpool_size,
+ (uint)str->length));
+ goto end;
+ }
+ header.header= (uchar*)str->str + fmt->fixed_hdr;
+ header.nmpool= header.header + header.header_size;
+ header.dtpool= header.nmpool + header.nmpool_size;
+ header.data_size= str->length - fmt->fixed_hdr -
+ header.header_size - header.nmpool_size;
+
+ /* read and check headers */
+ if (header.format == dyncol_fmt_num)
+ {
+ key= &num;
+ prev_key= &prev_num;
+ }
+ else
+ {
+ key= &name;
+ prev_key= &prev_name;
+ }
+ for (i= 0, header.entry= header.header;
+ i < header.column_count;
+ i++, header.entry+= header.entry_size)
{
- DYNAMIC_COLUMN tmp;
- uchar *header_base= (uchar *)str->str + FIXED_HEADER_SIZE,
- *write;
- if (dynamic_column_init_str(&tmp,
- (FIXED_HEADER_SIZE + new_header_size +
- new_data_size + DYNCOL_SYZERESERVE)))
+
+ if (header.format == dyncol_fmt_num)
{
- rc= ER_DYNCOL_RESOURCE;
- goto end;
+ num= uint2korr(header.entry);
+ }
+ else
+ {
+ DBUG_ASSERT(header.format == dyncol_fmt_str);
+ if (read_name(&header, header.entry, &name))
+ {
+ DBUG_PRINT("info", ("Reading name failed: Field order: %u"
+ " Name offset: %u"
+ " Name pool size: %u",
+ (uint) i,
+ uint2korr(header.entry),
+ (uint)header.nmpool_size));
+ goto end;
+ }
+ name_offset= name.str - (char *)header.nmpool;
}
- write= (uchar *)tmp.str + FIXED_HEADER_SIZE;
- /* Adjust tmp to contain whole the future header */
- tmp.length= FIXED_HEADER_SIZE + new_header_size;
- set_fixed_header(&tmp, new_offset_size, new_column_count);
- data_delta= 0;
+ if ((*fmt->type_and_offset_read)(&type, &data_offset,
+ header.entry + fmt->fixed_hdr_entry,
+ header.offset_size))
+ goto end;
- /*
- Copy data to the new string
- i= index in array of changes
- j= index in packed string header index
- */
+ DBUG_ASSERT(type != DYN_COL_NULL);
+ if (data_offset > header.data_size)
+ {
+ DBUG_PRINT("info", ("Field order: %u Data offset: %u"
+ " > Data pool size: %u",
+ (uint)i,
+ (uint)data_offset,
+ (uint)header.data_size));
+ goto end;
+ }
+ if (prev_type != DYN_COL_NULL)
+ {
+ /* It is not first entry */
+ if (prev_data_offset >= data_offset)
+ {
+ DBUG_PRINT("info", ("Field order: %u Previous data offset: %u"
+ " >= Current data offset: %u",
+ (uint)i,
+ (uint)prev_data_offset,
+ (uint)data_offset));
+ goto end;
+ }
+ if (prev_name_offset > name_offset)
+ {
+ DBUG_PRINT("info", ("Field order: %u Previous name offset: %u"
+ " > Current name offset: %u",
+ (uint)i,
+ (uint)prev_data_offset,
+ (uint)data_offset));
+ goto end;
+ }
+ if ((*fmt->column_sort)(&prev_key, &key) >= 0)
+ {
+ DBUG_PRINT("info", ("Field order: %u Previous key >= Current key",
+ (uint)i));
+ goto end;
+ }
+ }
+ prev_num= num;
+ prev_name= name;
+ prev_data_offset= data_offset;
+ prev_name_offset= name_offset;
+ prev_type= type;
+ }
- for (i= 0, j= 0; i < add_column_count || j < column_count; i++)
+ /* check data, which we can */
+ for (i= 0, header.entry= header.header;
+ i < header.column_count;
+ i++, header.entry+= header.entry_size)
+ {
+ DYNAMIC_COLUMN_VALUE store;
+ // already checked by previouse pass
+ (*fmt->type_and_offset_read)(&header.type, &header.offset,
+ header.entry + fmt->fixed_hdr_entry,
+ header.offset_size);
+ header.length=
+ hdr_interval_length(&header, header.entry + header.entry_size);
+ header.data= header.dtpool + header.offset;
+ switch ((header.type)) {
+ case DYN_COL_INT:
+ rc= dynamic_column_sint_read(&store, header.data, header.length);
+ break;
+ case DYN_COL_UINT:
+ rc= dynamic_column_uint_read(&store, header.data, header.length);
+ break;
+ case DYN_COL_DOUBLE:
+ rc= dynamic_column_double_read(&store, header.data, header.length);
+ break;
+ case DYN_COL_STRING:
+ rc= dynamic_column_string_read(&store, header.data, header.length);
+ break;
+ case DYN_COL_DECIMAL:
+ rc= dynamic_column_decimal_read(&store, header.data, header.length);
+ break;
+ case DYN_COL_DATETIME:
+ rc= dynamic_column_date_time_read(&store, header.data,
+ header.length);
+ break;
+ case DYN_COL_DATE:
+ rc= dynamic_column_date_read(&store, header.data, header.length);
+ break;
+ case DYN_COL_TIME:
+ rc= dynamic_column_time_read(&store, header.data, header.length);
+ break;
+ case DYN_COL_DYNCOL:
+ rc= dynamic_column_dyncol_read(&store, header.data, header.length);
+ break;
+ case DYN_COL_NULL:
+ default:
+ rc= ER_DYNCOL_FORMAT;
+ goto end;
+ }
+ if (rc != ER_DYNCOL_OK)
{
- size_t first_offset;
- uint start= j, end;
- LINT_INIT(first_offset);
+ DBUG_ASSERT(rc < 0);
+ DBUG_PRINT("info", ("Field order: %u Can't read data: %i",
+ (uint)i, (int) rc));
+ goto end;
+ }
+ }
- /*
- Search in i and j for the next column to add from i and where to
- add.
- */
+ rc= ER_DYNCOL_OK;
+end:
+ DBUG_RETURN(rc);
+}
- while (i < add_column_count && plan[i].act == PLAN_NOP)
- i++; /* skip NOP */
- if (i == add_column_count)
- j= end= column_count;
- else
- {
- /*
- old data portion. We don't need to check that j < column_count
- as plan[i].place is guaranteed to have a pointer inside the
- data.
- */
- while (header_base + j * entry_size < plan[i].place)
- j++;
- end= j;
- if ((plan[i].act == PLAN_REPLACE || plan[i].act == PLAN_DELETE))
- j++; /* data at 'j' will be removed */
- }
- if (plan[i].ddelta == 0 && offset_size == new_offset_size)
+enum enum_dyncol_func_result
+mariadb_dyncol_val_str(DYNAMIC_STRING *str, DYNAMIC_COLUMN_VALUE *val,
+ CHARSET_INFO *cs, char quote)
+{
+ char buff[40];
+ size_t len;
+ switch (val->type) {
+ case DYN_COL_INT:
+ len= snprintf(buff, sizeof(buff), "%lld", val->x.long_value);
+ if (dynstr_append_mem(str, buff, len))
+ return ER_DYNCOL_RESOURCE;
+ break;
+ case DYN_COL_UINT:
+ len= snprintf(buff, sizeof(buff), "%llu", val->x.ulong_value);
+ if (dynstr_append_mem(str, buff, len))
+ return ER_DYNCOL_RESOURCE;
+ break;
+ case DYN_COL_DOUBLE:
+ len= my_snprintf(buff, sizeof(buff), "%g", val->x.double_value);
+ if (dynstr_realloc(str, len + (quote ? 2 : 0)))
+ return ER_DYNCOL_RESOURCE;
+ if (quote)
+ str->str[str->length++]= quote;
+ dynstr_append_mem(str, buff, len);
+ if (quote)
+ str->str[str->length++]= quote;
+ break;
+ case DYN_COL_DYNCOL:
+ case DYN_COL_STRING:
{
- uchar *read= header_base + start * entry_size;
- DYNAMIC_COLUMN_TYPE tp;
- /*
- It's safe to copy the header unchanged. This is usually the
- case for the first header block before any changed data.
- */
- if (start < end) /* Avoid memcpy with 0 */
+ char *alloc= NULL;
+ char *from= val->x.string.value.str;
+ ulong bufflen;
+ my_bool conv= !my_charset_same(val->x.string.charset, cs);
+ my_bool rc;
+ len= val->x.string.value.length;
+ bufflen= (ulong)(len * (conv ? cs->mbmaxlen : 1));
+ if (dynstr_realloc(str, bufflen))
+ return ER_DYNCOL_RESOURCE;
+
+ // guaranty UTF-8 string for value
+ if (!my_charset_same(val->x.string.charset, cs))
{
- size_t length= entry_size * (end - start);
- memcpy(write, read, length);
- write+= length;
+ uint dummy_errors;
+ if (!quote)
+ {
+ /* convert to the destination */
+ str->length+= copy_and_convert_extended(str->str, bufflen,
+ cs,
+ from, (uint32)len,
+ val->x.string.charset,
+ &dummy_errors);
+ return ER_DYNCOL_OK;
+ }
+ if ((alloc= (char *)my_malloc(bufflen, MYF(0))))
+ {
+ len=
+ copy_and_convert_extended(alloc, bufflen, cs,
+ from, (uint32)len,
+ val->x.string.charset,
+ &dummy_errors);
+ from= alloc;
+ }
+ else
+ return ER_DYNCOL_RESOURCE;
}
- /* Read first_offset */
- type_and_offset_read(&tp, &first_offset, read, offset_size);
+ if (quote)
+ rc= dynstr_append_quoted(str, from, len, quote);
+ else
+ rc= dynstr_append_mem(str, from, len);
+ if (alloc)
+ my_free(alloc);
+ if (rc)
+ return ER_DYNCOL_RESOURCE;
+ break;
}
- else
+ case DYN_COL_DECIMAL:
{
- /*
- Adjust all headers since last loop.
- We have to do this as the offset for data has moved
- */
- for (k= start; k < end; k++)
- {
- uchar *read= header_base + k * entry_size;
- size_t offs;
- uint nm;
- DYNAMIC_COLUMN_TYPE tp;
+ int len= sizeof(buff);
+ decimal2string(&val->x.decimal.value, buff, &len,
+ 0, val->x.decimal.value.frac,
+ '0');
+ if (dynstr_append_mem(str, buff, len))
+ return ER_DYNCOL_RESOURCE;
+ break;
+ }
+ case DYN_COL_DATETIME:
+ case DYN_COL_DATE:
+ case DYN_COL_TIME:
+ len= my_TIME_to_str(&val->x.time_value, buff, AUTO_SEC_PART_DIGITS);
+ if (dynstr_realloc(str, len + (quote ? 2 : 0)))
+ return ER_DYNCOL_RESOURCE;
+ if (quote)
+ str->str[str->length++]= '"';
+ dynstr_append_mem(str, buff, len);
+ if (quote)
+ str->str[str->length++]= '"';
+ break;
+ case DYN_COL_NULL:
+ if (dynstr_append_mem(str, "null", 4))
+ return ER_DYNCOL_RESOURCE;
+ break;
+ default:
+ return(ER_DYNCOL_FORMAT);
+ }
+ return(ER_DYNCOL_OK);
+}
- nm= uint2korr(read); /* Column nummber */
- type_and_offset_read(&tp, &offs, read, offset_size);
- if (k == start)
- first_offset= offs;
- else if (offs < first_offset)
+
+enum enum_dyncol_func_result
+mariadb_dyncol_val_long(longlong *ll, DYNAMIC_COLUMN_VALUE *val)
+{
+ enum enum_dyncol_func_result rc= ER_DYNCOL_OK;
+ *ll= 0;
+ switch (val->type) {
+ case DYN_COL_INT:
+ *ll= val->x.long_value;
+ break;
+ case DYN_COL_UINT:
+ *ll= (longlong)val->x.ulong_value;
+ if (val->x.ulong_value > ULONGLONG_MAX)
+ rc= ER_DYNCOL_TRUNCATED;
+ break;
+ case DYN_COL_DOUBLE:
+ *ll= (longlong)val->x.double_value;
+ if (((double) *ll) != val->x.double_value)
+ rc= ER_DYNCOL_TRUNCATED;
+ break;
+ case DYN_COL_STRING:
+ {
+ char *src= val->x.string.value.str;
+ size_t len= val->x.string.value.length;
+ longlong i= 0, sign= 1;
+
+ while (len && my_isspace(&my_charset_latin1, *src)) src++,len--;
+
+ if (len)
+ {
+ if (*src == '-')
{
- dynamic_column_column_free(&tmp);
- rc= ER_DYNCOL_FORMAT;
- goto end;
+ sign= -1;
+ src++;
+ } else if (*src == '-')
+ src++;
+ while(len && my_isdigit(&my_charset_latin1, *src))
+ {
+ i= i * 10 + (*src - '0');
+ src++;
}
-
- offs+= plan[i].ddelta;
- int2store(write, nm);
- /* write rest of data at write + COLUMN_NUMBER_SIZE */
- type_and_offset_store(write, new_offset_size, tp, offs);
- write+= new_entry_size;
}
+ else
+ rc= ER_DYNCOL_TRUNCATED;
+ if (len)
+ rc= ER_DYNCOL_TRUNCATED;
+ *ll= i * sign;
+ break;
}
+ case DYN_COL_DECIMAL:
+ if (decimal2longlong(&val->x.decimal.value, ll) != E_DEC_OK)
+ rc= ER_DYNCOL_TRUNCATED;
+ break;
+ case DYN_COL_DATETIME:
+ *ll= (val->x.time_value.year * 10000000000ull +
+ val->x.time_value.month * 100000000L +
+ val->x.time_value.day * 1000000 +
+ val->x.time_value.hour * 10000 +
+ val->x.time_value.minute * 100 +
+ val->x.time_value.second) *
+ (val->x.time_value.neg ? -1 : 1);
+ break;
+ case DYN_COL_DATE:
+ *ll= (val->x.time_value.year * 10000 +
+ val->x.time_value.month * 100 +
+ val->x.time_value.day) *
+ (val->x.time_value.neg ? -1 : 1);
+ break;
+ case DYN_COL_TIME:
+ *ll= (val->x.time_value.hour * 10000 +
+ val->x.time_value.minute * 100 +
+ val->x.time_value.second) *
+ (val->x.time_value.neg ? -1 : 1);
+ break;
+ case DYN_COL_DYNCOL:
+ case DYN_COL_NULL:
+ rc= ER_DYNCOL_TRUNCATED;
+ break;
+ default:
+ return(ER_DYNCOL_FORMAT);
+ }
+ return(rc);
+}
- /* copy first the data that was not replaced in original packed data */
- if (start < end)
- {
- /* Add old data last in 'tmp' */
- size_t data_size=
- get_length_interval(header_base + start * entry_size,
- header_base + end * entry_size,
- header_end, offset_size, max_offset);
- if ((long) data_size < 0 ||
- data_size > max_offset - first_offset)
- {
- dynamic_column_column_free(&tmp);
- rc= ER_DYNCOL_FORMAT;
- goto end;
- }
- memcpy(tmp.str + tmp.length, (char *)header_end + first_offset,
- data_size);
- tmp.length+= data_size;
+enum enum_dyncol_func_result
+mariadb_dyncol_val_double(double *dbl, DYNAMIC_COLUMN_VALUE *val)
+{
+ enum enum_dyncol_func_result rc= ER_DYNCOL_OK;
+ *dbl= 0;
+ switch (val->type) {
+ case DYN_COL_INT:
+ *dbl= (double)val->x.long_value;
+ if (((longlong) *dbl) != val->x.long_value)
+ rc= ER_DYNCOL_TRUNCATED;
+ break;
+ case DYN_COL_UINT:
+ *dbl= (double)val->x.ulong_value;
+ if (((ulonglong) *dbl) != val->x.ulong_value)
+ rc= ER_DYNCOL_TRUNCATED;
+ break;
+ case DYN_COL_DOUBLE:
+ *dbl= val->x.double_value;
+ break;
+ case DYN_COL_STRING:
+ {
+ char *str, *end;
+ if ((str= malloc(val->x.string.value.length + 1)))
+ return ER_DYNCOL_RESOURCE;
+ memcpy(str, val->x.string.value.str, val->x.string.value.length);
+ str[val->x.string.value.length]= '\0';
+ *dbl= strtod(str, &end);
+ if (*end != '\0')
+ rc= ER_DYNCOL_TRUNCATED;
}
+ case DYN_COL_DECIMAL:
+ if (decimal2double(&val->x.decimal.value, dbl) != E_DEC_OK)
+ rc= ER_DYNCOL_TRUNCATED;
+ break;
+ case DYN_COL_DATETIME:
+ *dbl= (double)(val->x.time_value.year * 10000000000ull +
+ val->x.time_value.month * 100000000L +
+ val->x.time_value.day * 1000000 +
+ val->x.time_value.hour * 10000 +
+ val->x.time_value.minute * 100 +
+ val->x.time_value.second) *
+ (val->x.time_value.neg ? -1 : 1);
+ break;
+ case DYN_COL_DATE:
+ *dbl= (double)(val->x.time_value.year * 10000 +
+ val->x.time_value.month * 100 +
+ val->x.time_value.day) *
+ (val->x.time_value.neg ? -1 : 1);
+ break;
+ case DYN_COL_TIME:
+ *dbl= (double)(val->x.time_value.hour * 10000 +
+ val->x.time_value.minute * 100 +
+ val->x.time_value.second) *
+ (val->x.time_value.neg ? -1 : 1);
+ break;
+ case DYN_COL_DYNCOL:
+ case DYN_COL_NULL:
+ rc= ER_DYNCOL_TRUNCATED;
+ break;
+ default:
+ return(ER_DYNCOL_FORMAT);
+ }
+ return(rc);
+}
+
+
+/**
+ Convert to JSON
+
+ @param str The packed string
+ @param json Where to put json result
+
+ @return ER_DYNCOL_* return code
+*/
- /* new data adding */
- if (i < add_column_count)
+#define JSON_STACK_PROTECTION 10
+
+static enum enum_dyncol_func_result
+mariadb_dyncol_json_internal(DYNAMIC_COLUMN *str, DYNAMIC_STRING *json,
+ uint lvl)
+{
+ DYN_HEADER header;
+ uint i;
+ enum enum_dyncol_func_result rc;
+
+ if (lvl >= JSON_STACK_PROTECTION)
+ {
+ rc= ER_DYNCOL_RESOURCE;
+ goto err;
+ }
+
+
+ if (str->length == 0)
+ return ER_DYNCOL_OK; /* no columns */
+
+ if ((rc= init_read_hdr(&header, str)) < 0)
+ goto err;
+
+ if (header.entry_size * header.column_count + FIXED_HEADER_SIZE >
+ str->length)
+ {
+ rc= ER_DYNCOL_FORMAT;
+ goto err;
+ }
+
+ rc= ER_DYNCOL_RESOURCE;
+
+ if (dynstr_append_mem(json, "{", 1))
+ goto err;
+ for (i= 0, header.entry= header.header;
+ i < header.column_count;
+ i++, header.entry+= header.entry_size)
+ {
+ DYNAMIC_COLUMN_VALUE val;
+ if (i != 0 && dynstr_append_mem(json, ",", 1))
+ goto err;
+ header.length=
+ hdr_interval_length(&header, header.entry + header.entry_size);
+ header.data= header.dtpool + header.offset;
+ /*
+ Check that the found data is withing the ranges. This can happen if
+ we get data with wrong offsets.
+ */
+ if (header.length == DYNCOL_OFFSET_ERROR ||
+ header.length > INT_MAX || header.offset > header.data_size)
+ {
+ rc= ER_DYNCOL_FORMAT;
+ goto err;
+ }
+ if ((rc= dynamic_column_get_value(&header, &val)) < 0)
+ goto err;
+ if (header.format == dyncol_fmt_num)
+ {
+ uint nm= uint2korr(header.entry);
+ if (dynstr_realloc(json, DYNCOL_NUM_CHAR + 3))
+ goto err;
+ json->str[json->length++]= '"';
+ json->length+= (snprintf(json->str + json->length,
+ DYNCOL_NUM_CHAR, "%u", nm));
+ }
+ else
+ {
+ LEX_STRING name;
+ if (read_name(&header, header.entry, &name))
{
- if( plan[i].act == PLAN_ADD || plan[i].act == PLAN_REPLACE)
- {
- int2store(write, plan[i].num[0]);
- type_and_offset_store(write, new_offset_size,
- plan[i].val[0].type,
- tmp.length -
- (FIXED_HEADER_SIZE + new_header_size));
- write+= new_entry_size;
- data_store(&tmp, plan[i].val); /* Append new data */
- }
- data_delta= plan[i].ddelta;
+ rc= ER_DYNCOL_FORMAT;
+ goto err;
+ }
+ if (dynstr_realloc(json, name.length + 3))
+ goto err;
+ json->str[json->length++]= '"';
+ memcpy(json->str + json->length, name.str, name.length);
+ json->length+= name.length;
+ }
+ json->str[json->length++]= '"';
+ json->str[json->length++]= ':';
+ if (val.type == DYN_COL_DYNCOL)
+ {
+ /* here we use it only for read so can cheat a bit */
+ DYNAMIC_COLUMN dc;
+ bzero(&dc, sizeof(dc));
+ dc.str= val.x.string.value.str;
+ dc.length= val.x.string.value.length;
+ if (mariadb_dyncol_json_internal(&dc, json, lvl + 1) < 0)
+ {
+ dc.str= NULL; dc.length= 0;
+ goto err;
}
+ dc.str= NULL; dc.length= 0;
+ }
+ else
+ {
+ if ((rc= mariadb_dyncol_val_str(json, &val,
+ &my_charset_utf8_general_ci, '"')) < 0)
+ goto err;
}
- dynamic_column_column_free(str);
- *str= tmp;
}
+ if (dynstr_append_mem(json, "}", 1))
+ {
+ rc= ER_DYNCOL_RESOURCE;
+ goto err;
+ }
+ return ER_DYNCOL_OK;
- rc= ER_DYNCOL_OK;
-
-end:
- my_free(plan);
+err:
+ json->length= 0;
return rc;
+}
-create_new_string:
- /* There is no columns from before, so let's just add the new ones */
- rc= ER_DYNCOL_OK;
- if (not_null != 0)
- rc= dynamic_column_create_many_internal(str, add_column_count,
- column_numbers, values,
- str->str == NULL);
- goto end;
+enum enum_dyncol_func_result
+mariadb_dyncol_json(DYNAMIC_COLUMN *str, DYNAMIC_STRING *json)
+{
+
+ if (init_dynamic_string(json, NULL, str->length * 2, 100))
+ return ER_DYNCOL_RESOURCE;
+
+ return mariadb_dyncol_json_internal(str, json, 1);
}
/**
- Update the packed string with the given column
+ Convert to DYNAMIC_COLUMN_VALUE values and names (LEX_STING) dynamic array
- @param str String where to write the data
- @param column_number Array of columns number
- @param values Array of columns values
+ @param str The packed string
+ @param count number of elements in the arrays
+ @param names Where to put names (should be free by user)
+ @param vals Where to put values (should be free by user)
@return ER_DYNCOL_* return code
*/
+enum enum_dyncol_func_result
+mariadb_dyncol_unpack(DYNAMIC_COLUMN *str,
+ uint *count,
+ LEX_STRING **names, DYNAMIC_COLUMN_VALUE **vals)
+{
+ DYN_HEADER header;
+ char *nm;
+ uint i;
+ enum enum_dyncol_func_result rc;
+
+ *count= 0; *names= 0; *vals= 0;
-int dynamic_column_update(DYNAMIC_COLUMN *str, uint column_nr,
- DYNAMIC_COLUMN_VALUE *value)
+ if (str->length == 0)
+ return ER_DYNCOL_OK; /* no columns */
+
+ if ((rc= init_read_hdr(&header, str)) < 0)
+ return rc;
+
+
+ if (header.entry_size * header.column_count + FIXED_HEADER_SIZE >
+ str->length)
+ return ER_DYNCOL_FORMAT;
+
+ *vals= my_malloc(sizeof(DYNAMIC_COLUMN_VALUE)* header.column_count, MYF(0));
+ if (header.format == dyncol_fmt_num)
+ {
+ *names= my_malloc(sizeof(LEX_STRING) * header.column_count +
+ DYNCOL_NUM_CHAR * header.column_count, MYF(0));
+ nm= (char *)(names + sizeof(LEX_STRING) * header.column_count);
+ }
+ else
+ {
+ *names= my_malloc(sizeof(LEX_STRING) * header.column_count, MYF(0));
+ nm= 0;
+ }
+ if (!(*vals) || !(*names))
+ {
+ rc= ER_DYNCOL_RESOURCE;
+ goto err;
+ }
+
+ for (i= 0, header.entry= header.header;
+ i < header.column_count;
+ i++, header.entry+= header.entry_size)
+ {
+ header.length=
+ hdr_interval_length(&header, header.entry + header.entry_size);
+ header.data= header.dtpool + header.offset;
+ /*
+ Check that the found data is withing the ranges. This can happen if
+ we get data with wrong offsets.
+ */
+ if (header.length == DYNCOL_OFFSET_ERROR ||
+ header.length > INT_MAX || header.offset > header.data_size)
+ {
+ rc= ER_DYNCOL_FORMAT;
+ goto err;
+ }
+ if ((rc= dynamic_column_get_value(&header, (*vals) + i)) < 0)
+ goto err;
+
+ if (header.format == dyncol_fmt_num)
+ {
+ uint num= uint2korr(header.entry);
+ (*names)[i].str= nm;
+ (*names)[i].length= snprintf(nm, DYNCOL_NUM_CHAR, "%u", num);
+ nm+= (*names)[i].length + 1;
+ }
+ else
+ {
+ if (read_name(&header, header.entry, (*names) + i))
+ {
+ rc= ER_DYNCOL_FORMAT;
+ goto err;
+ }
+ }
+ }
+
+ *count= header.column_count;
+ return ER_DYNCOL_OK;
+
+err:
+ if (*vals)
+ {
+ my_free(*vals);
+ *vals= 0;
+ }
+ if (*names)
+ {
+ my_free(*names);
+ *names= 0;
+ }
+ return rc;
+}
+
+/**
+ Get not NULL column count
+
+ @param str The packed string
+ @param column_count Where to put column count
+
+ @return ER_DYNCOL_* return code
+*/
+
+enum enum_dyncol_func_result
+mariadb_dyncol_column_count(DYNAMIC_COLUMN *str, uint *column_count)
{
- return dynamic_column_update_many(str, 1, &column_nr, value);
+ DYN_HEADER header;
+ enum enum_dyncol_func_result rc;
+
+ if ((rc= init_read_hdr(&header, str)) < 0)
+ return rc;
+ *column_count= header.column_count;
+ return rc;
}
diff --git a/mysys/mf_fn_ext.c b/mysys/mf_fn_ext.c
index 47fc67cabbd..cbf0d5dd9e4 100644
--- a/mysys/mf_fn_ext.c
+++ b/mysys/mf_fn_ext.c
@@ -52,3 +52,40 @@ char *fn_ext(const char *name)
pos=strchr(gpos,FN_EXTCHAR);
DBUG_RETURN((char*) (pos ? pos : strend(gpos)));
} /* fn_ext */
+
+
+/*
+ Return a pointer to the extension of the filename.
+
+ SYNOPSIS
+ fn_ext()
+ name Name of file
+
+ DESCRIPTION
+ The extension is defined as everything after the last extension character
+ (normally '.') after the directory name.
+
+ RETURN VALUES
+ Pointer to to the extension character. If there isn't any extension,
+ points at the end ASCII(0) of the filename.
+*/
+
+char *fn_ext2(const char *name)
+{
+ register const char *pos, *gpos;
+ DBUG_ENTER("fn_ext");
+ DBUG_PRINT("mfunkt",("name: '%s'",name));
+
+#if defined(FN_DEVCHAR) || defined(BASKSLASH_MBTAIL)
+ {
+ char buff[FN_REFLEN];
+ size_t res_length;
+ gpos= name+ dirname_part(buff,(char*) name, &res_length);
+ }
+#else
+ if (!(gpos= strrchr(name, FN_LIBCHAR)))
+ gpos= name;
+#endif
+ pos=strrchr(gpos,FN_EXTCHAR);
+ DBUG_RETURN((char*) (pos ? pos : strend(gpos)));
+} /* fn_ext */
diff --git a/mysys/mf_radix.c b/mysys/mf_radix.c
index 582ca76b8f8..2df1220acdd 100644
--- a/mysys/mf_radix.c
+++ b/mysys/mf_radix.c
@@ -25,6 +25,11 @@
/* Radixsort */
+my_bool radixsort_is_appliccable(uint n_items, size_t size_of_element)
+{
+ return size_of_element <= 20 && n_items >= 1000 && n_items < 100000;
+}
+
void radixsort_for_str_ptr(uchar **base, uint number_of_elements, size_t size_of_element, uchar **buffer)
{
uchar **end,**ptr,**buffer_ptr;
diff --git a/mysys/mf_sort.c b/mysys/mf_sort.c
index 9ef02787716..b2c58c26624 100644
--- a/mysys/mf_sort.c
+++ b/mysys/mf_sort.c
@@ -23,7 +23,7 @@ void my_string_ptr_sort(uchar *base, uint items, size_t size)
#if INT_MAX > 65536L
uchar **ptr=0;
- if (size <= 20 && items >= 1000 && items < 100000 &&
+ if (radixsort_is_appliccable(items, size) &&
(ptr= (uchar**) my_malloc(items*sizeof(char*),MYF(0))))
{
radixsort_for_str_ptr((uchar**) base,items,size,ptr);
diff --git a/mysys/mf_tempdir.c b/mysys/mf_tempdir.c
index eceb90bb619..f8450a100e5 100644
--- a/mysys/mf_tempdir.c
+++ b/mysys/mf_tempdir.c
@@ -30,7 +30,7 @@ my_bool init_tmpdir(MY_TMPDIR *tmpdir, const char *pathlist)
DBUG_PRINT("enter", ("pathlist: %s", pathlist ? pathlist : "NULL"));
mysql_mutex_init(key_TMPDIR_mutex, &tmpdir->mutex, MY_MUTEX_INIT_FAST);
- if (my_init_dynamic_array(&tmpdir->full_list, sizeof(char*), 1, 5))
+ if (my_init_dynamic_array(&tmpdir->full_list, sizeof(char*), 1, 5, MYF(0)))
goto err;
if (!pathlist || !pathlist[0])
{
diff --git a/mysys/my_alloc.c b/mysys/my_alloc.c
index 1054db6cee4..6c8a73df4a7 100644
--- a/mysys/my_alloc.c
+++ b/mysys/my_alloc.c
@@ -22,6 +22,10 @@
#undef EXTRA_DEBUG
#define EXTRA_DEBUG
+/* data packed in MEM_ROOT -> min_malloc */
+
+#define MALLOC_FLAG(A) ((A & 1) ? MY_THREAD_SPECIFIC : 0)
+
/*
Initialize memory root
@@ -34,6 +38,7 @@
should be no less than ALLOC_ROOT_MIN_BLOCK_SIZE)
pre_alloc_size - if non-0, then size of block that should be
pre-allocated during memory root initialization.
+ my_flags MY_THREAD_SPECIFIC flag for my_malloc
DESCRIPTION
This function prepares memory root for further use, sets initial size of
@@ -41,17 +46,24 @@
Altough error can happen during execution of this function if
pre_alloc_size is non-0 it won't be reported. Instead it will be
reported as error in first alloc_root() on this memory root.
+
+ We don't want to change the structure size for MEM_ROOT.
+ Because of this, we store in MY_THREAD_SPECIFIC as bit 1 in block_size
*/
void init_alloc_root(MEM_ROOT *mem_root, size_t block_size,
- size_t pre_alloc_size __attribute__((unused)))
+ size_t pre_alloc_size __attribute__((unused)),
+ myf my_flags)
{
DBUG_ENTER("init_alloc_root");
DBUG_PRINT("enter",("root: 0x%lx", (long) mem_root));
mem_root->free= mem_root->used= mem_root->pre_alloc= 0;
mem_root->min_malloc= 32;
- mem_root->block_size= block_size - ALLOC_ROOT_MIN_BLOCK_SIZE;
+ mem_root->block_size= (block_size - ALLOC_ROOT_MIN_BLOCK_SIZE) & ~1;
+ if (test(my_flags & MY_THREAD_SPECIFIC))
+ mem_root->block_size|= 1;
+
mem_root->error_handler= 0;
mem_root->block_num= 4; /* We shift this with >>2 */
mem_root->first_block_usage= 0;
@@ -61,7 +73,7 @@ void init_alloc_root(MEM_ROOT *mem_root, size_t block_size,
{
if ((mem_root->free= mem_root->pre_alloc=
(USED_MEM*) my_malloc(pre_alloc_size+ ALIGN_SIZE(sizeof(USED_MEM)),
- MYF(0))))
+ MYF(my_flags))))
{
mem_root->free->size= pre_alloc_size+ALIGN_SIZE(sizeof(USED_MEM));
mem_root->free->left= pre_alloc_size;
@@ -72,7 +84,6 @@ void init_alloc_root(MEM_ROOT *mem_root, size_t block_size,
DBUG_VOID_RETURN;
}
-
/*
SYNOPSIS
reset_root_defaults()
@@ -95,7 +106,8 @@ void reset_root_defaults(MEM_ROOT *mem_root, size_t block_size,
{
DBUG_ASSERT(alloc_root_inited(mem_root));
- mem_root->block_size= block_size - ALLOC_ROOT_MIN_BLOCK_SIZE;
+ mem_root->block_size= (((block_size - ALLOC_ROOT_MIN_BLOCK_SIZE) & ~1) |
+ (mem_root->block_size & 1));
#if !(defined(HAVE_valgrind) && defined(EXTRA_DEBUG))
if (pre_alloc_size)
{
@@ -126,7 +138,9 @@ void reset_root_defaults(MEM_ROOT *mem_root, size_t block_size,
prev= &mem->next;
}
/* Allocate new prealloc block and add it to the end of free list */
- if ((mem= (USED_MEM *) my_malloc(size, MYF(0))))
+ if ((mem= (USED_MEM *) my_malloc(size,
+ MYF(MALLOC_FLAG(mem_root->
+ block_size)))))
{
mem->size= size;
mem->left= pre_alloc_size;
@@ -163,7 +177,9 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length)
});
length+=ALIGN_SIZE(sizeof(USED_MEM));
- if (!(next = (USED_MEM*) my_malloc(length,MYF(MY_WME | ME_FATALERROR))))
+ if (!(next = (USED_MEM*) my_malloc(length,
+ MYF(MY_WME | ME_FATALERROR |
+ MALLOC_FLAG(mem_root->block_size)))))
{
if (mem_root->error_handler)
(*mem_root->error_handler)();
@@ -210,11 +226,14 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length)
}
if (! next)
{ /* Time to alloc new block */
- block_size= mem_root->block_size * (mem_root->block_num >> 2);
+ block_size= (mem_root->block_size & ~1) * (mem_root->block_num >> 2);
get_size= length+ALIGN_SIZE(sizeof(USED_MEM));
get_size= max(get_size, block_size);
- if (!(next = (USED_MEM*) my_malloc(get_size,MYF(MY_WME | ME_FATALERROR))))
+ if (!(next = (USED_MEM*) my_malloc(get_size,
+ MYF(MY_WME | ME_FATALERROR |
+ MALLOC_FLAG(mem_root->
+ block_size)))))
{
if (mem_root->error_handler)
(*mem_root->error_handler)();
diff --git a/mysys/my_chmod.c b/mysys/my_chmod.c
index afdea758833..91fd51b47d2 100644
--- a/mysys/my_chmod.c
+++ b/mysys/my_chmod.c
@@ -34,7 +34,7 @@
int my_chmod(const char *name, mode_t mode, myf my_flags)
{
DBUG_ENTER("my_chmod");
- DBUG_PRINT("my",("name: %s mode: %lu flags: %d", name, (ulong) mode,
+ DBUG_PRINT("my",("name: %s mode: %lu flags: %lu", name, (ulong) mode,
my_flags));
if (chmod(name, mode))
diff --git a/mysys/my_chsize.c b/mysys/my_chsize.c
index 63964916d6f..51da6be7935 100644
--- a/mysys/my_chsize.c
+++ b/mysys/my_chsize.c
@@ -43,7 +43,7 @@ int my_chsize(File fd, my_off_t newlength, int filler, myf MyFlags)
my_off_t oldsize;
uchar buff[IO_SIZE];
DBUG_ENTER("my_chsize");
- DBUG_PRINT("my",("fd: %d length: %lu MyFlags: %d",fd,(ulong) newlength,
+ DBUG_PRINT("my",("fd: %d length: %lu MyFlags: %lu",fd,(ulong) newlength,
MyFlags));
if ((oldsize= my_seek(fd, 0L, MY_SEEK_END, MYF(MY_WME+MY_FAE))) == newlength)
diff --git a/mysys/my_copy.c b/mysys/my_copy.c
index 21de1e953a2..58cacb9639d 100644
--- a/mysys/my_copy.c
+++ b/mysys/my_copy.c
@@ -61,7 +61,7 @@ int my_copy(const char *from, const char *to, myf MyFlags)
MY_STAT stat_buff,new_stat_buff;
my_bool file_created= 0;
DBUG_ENTER("my_copy");
- DBUG_PRINT("my",("from %s to %s MyFlags %d", from, to, MyFlags));
+ DBUG_PRINT("my",("from %s to %s MyFlags %lu", from, to, MyFlags));
from_file=to_file= -1;
DBUG_ASSERT(!(MyFlags & (MY_FNABP | MY_NABP))); /* for my_read/my_write */
diff --git a/mysys/my_create.c b/mysys/my_create.c
index 2e4e8eb1af2..51de343d4a1 100644
--- a/mysys/my_create.c
+++ b/mysys/my_create.c
@@ -38,7 +38,7 @@ File my_create(const char *FileName, int CreateFlags, int access_flags,
{
int fd, rc;
DBUG_ENTER("my_create");
- DBUG_PRINT("my",("Name: '%s' CreateFlags: %d AccessFlags: %d MyFlags: %d",
+ DBUG_PRINT("my",("Name: '%s' CreateFlags: %d AccessFlags: %d MyFlags: %lu",
FileName, CreateFlags, access_flags, MyFlags));
#if defined(_WIN32)
fd= my_win_open(FileName, access_flags | O_CREAT);
diff --git a/mysys/my_delete.c b/mysys/my_delete.c
index cf9573f592b..0655501aa16 100644
--- a/mysys/my_delete.c
+++ b/mysys/my_delete.c
@@ -21,7 +21,7 @@ int my_delete(const char *name, myf MyFlags)
{
int err;
DBUG_ENTER("my_delete");
- DBUG_PRINT("my",("name %s MyFlags %d", name, MyFlags));
+ DBUG_PRINT("my",("name %s MyFlags %lu", name, MyFlags));
if ((err = unlink(name)) == -1)
{
diff --git a/mysys/my_error.c b/mysys/my_error.c
index 3302add688b..08c67412fe1 100644
--- a/mysys/my_error.c
+++ b/mysys/my_error.c
@@ -75,7 +75,7 @@ void my_error(int nr, myf MyFlags, ...)
va_list args;
char ebuff[ERRMSGSIZE];
DBUG_ENTER("my_error");
- DBUG_PRINT("my", ("nr: %d MyFlags: %d errno: %d", nr, MyFlags, errno));
+ DBUG_PRINT("my", ("nr: %d MyFlags: %lu errno: %d", nr, MyFlags, errno));
/* Search for the error messages array, which could contain the message. */
for (meh_p= my_errmsgs_list; meh_p; meh_p= meh_p->meh_next)
@@ -114,7 +114,7 @@ void my_printf_error(uint error, const char *format, myf MyFlags, ...)
va_list args;
char ebuff[ERRMSGSIZE];
DBUG_ENTER("my_printf_error");
- DBUG_PRINT("my", ("nr: %d MyFlags: %d errno: %d format: %s",
+ DBUG_PRINT("my", ("nr: %d MyFlags: %lu errno: %d format: %s",
error, MyFlags, errno, format));
va_start(args,MyFlags);
@@ -140,7 +140,7 @@ void my_printv_error(uint error, const char *format, myf MyFlags, va_list ap)
{
char ebuff[ERRMSGSIZE];
DBUG_ENTER("my_printv_error");
- DBUG_PRINT("my", ("nr: %d MyFlags: %d errno: %d format: %s",
+ DBUG_PRINT("my", ("nr: %d MyFlags: %lu errno: %d format: %s",
error, MyFlags, errno, format));
(void) my_vsnprintf(ebuff, sizeof(ebuff), format, ap);
diff --git a/mysys/my_fopen.c b/mysys/my_fopen.c
index 54469d2c05a..8a30dd6c10b 100644
--- a/mysys/my_fopen.c
+++ b/mysys/my_fopen.c
@@ -44,7 +44,7 @@ FILE *my_fopen(const char *filename, int flags, myf MyFlags)
FILE *fd;
char type[5];
DBUG_ENTER("my_fopen");
- DBUG_PRINT("my",("Name: '%s' flags: %d MyFlags: %d",
+ DBUG_PRINT("my",("Name: '%s' flags: %d MyFlags: %lu",
filename, flags, MyFlags));
make_ftype(type,flags);
@@ -229,7 +229,7 @@ int my_fclose(FILE *fd, myf MyFlags)
{
int err,file;
DBUG_ENTER("my_fclose");
- DBUG_PRINT("my",("stream: 0x%lx MyFlags: %d", (long) fd, MyFlags));
+ DBUG_PRINT("my",("stream: 0x%lx MyFlags: %lu", (long) fd, MyFlags));
mysql_mutex_lock(&THR_LOCK_open);
file= my_fileno(fd);
@@ -265,7 +265,7 @@ FILE *my_fdopen(File Filedes, const char *name, int Flags, myf MyFlags)
FILE *fd;
char type[5];
DBUG_ENTER("my_fdopen");
- DBUG_PRINT("my",("fd: %d Flags: %d MyFlags: %d",
+ DBUG_PRINT("my",("fd: %d Flags: %d MyFlags: %lu",
Filedes, Flags, MyFlags));
make_ftype(type,Flags);
diff --git a/mysys/my_fstream.c b/mysys/my_fstream.c
index e758609741b..de752fa149f 100644
--- a/mysys/my_fstream.c
+++ b/mysys/my_fstream.c
@@ -46,7 +46,7 @@ size_t my_fread(FILE *stream, uchar *Buffer, size_t Count, myf MyFlags)
{
size_t readbytes;
DBUG_ENTER("my_fread");
- DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %d",
+ DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %lu",
(long) stream, (long) Buffer, (uint) Count, MyFlags));
if ((readbytes= fread(Buffer, sizeof(char), Count, stream)) != Count)
@@ -94,7 +94,7 @@ size_t my_fwrite(FILE *stream, const uchar *Buffer, size_t Count, myf MyFlags)
uint errors;
#endif
DBUG_ENTER("my_fwrite");
- DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %d",
+ DBUG_PRINT("my",("stream: 0x%lx Buffer: 0x%lx Count: %u MyFlags: %lu",
(long) stream, (long) Buffer, (uint) Count, MyFlags));
#if !defined(NO_BACKGROUND) && defined(USE_MY_STREAM)
@@ -163,7 +163,7 @@ my_off_t my_fseek(FILE *stream, my_off_t pos, int whence,
myf MyFlags __attribute__((unused)))
{
DBUG_ENTER("my_fseek");
- DBUG_PRINT("my",("stream: 0x%lx pos: %lu whence: %d MyFlags: %d",
+ DBUG_PRINT("my",("stream: 0x%lx pos: %lu whence: %d MyFlags: %lu",
(long) stream, (long) pos, whence, MyFlags));
DBUG_RETURN(fseek(stream, (off_t) pos, whence) ?
MY_FILEPOS_ERROR : (my_off_t) ftell(stream));
@@ -176,7 +176,7 @@ my_off_t my_ftell(FILE *stream, myf MyFlags __attribute__((unused)))
{
off_t pos;
DBUG_ENTER("my_ftell");
- DBUG_PRINT("my",("stream: 0x%lx MyFlags: %d", (long) stream, MyFlags));
+ DBUG_PRINT("my",("stream: 0x%lx MyFlags: %lu", (long) stream, MyFlags));
pos=ftell(stream);
DBUG_PRINT("exit",("ftell: %lu",(ulong) pos));
DBUG_RETURN((my_off_t) pos);
diff --git a/mysys/my_getwd.c b/mysys/my_getwd.c
index 444ed4273b5..79ec58d3c5c 100644
--- a/mysys/my_getwd.c
+++ b/mysys/my_getwd.c
@@ -48,7 +48,7 @@ int my_getwd(char * buf, size_t size, myf MyFlags)
{
char * pos;
DBUG_ENTER("my_getwd");
- DBUG_PRINT("my",("buf: 0x%lx size: %u MyFlags %d",
+ DBUG_PRINT("my",("buf: 0x%lx size: %u MyFlags %lu",
(long) buf, (uint) size, MyFlags));
if (size < 1)
@@ -95,7 +95,7 @@ int my_setwd(const char *dir, myf MyFlags)
size_t length;
char *start, *pos;
DBUG_ENTER("my_setwd");
- DBUG_PRINT("my",("dir: '%s' MyFlags %d", dir, MyFlags));
+ DBUG_PRINT("my",("dir: '%s' MyFlags %lu", dir, MyFlags));
start=(char *) dir;
if (! dir[0] || (dir[0] == FN_LIBCHAR && dir[1] == 0))
diff --git a/mysys/my_lib.c b/mysys/my_lib.c
index b42a3d55d93..9c8655338c1 100644
--- a/mysys/my_lib.c
+++ b/mysys/my_lib.c
@@ -100,7 +100,7 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
char dirent_tmp[sizeof(struct dirent)+_POSIX_PATH_MAX+1];
DBUG_ENTER("my_dir");
- DBUG_PRINT("my",("path: '%s' MyFlags: %d",path,MyFlags));
+ DBUG_PRINT("my",("path: '%s' MyFlags: %lu",path,MyFlags));
#if !defined(HAVE_READDIR_R)
mysql_mutex_lock(&THR_LOCK_open);
@@ -122,12 +122,14 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
ALIGN_SIZE(sizeof(DYNAMIC_ARRAY)));
if (my_init_dynamic_array(dir_entries_storage, sizeof(FILEINFO),
- ENTRIES_START_SIZE, ENTRIES_INCREMENT))
+ ENTRIES_START_SIZE, ENTRIES_INCREMENT,
+ MYF(MyFlags)))
{
my_free(buffer);
goto error;
}
- init_alloc_root(names_storage, NAMES_START_SIZE, NAMES_START_SIZE);
+ init_alloc_root(names_storage, NAMES_START_SIZE, NAMES_START_SIZE,
+ MYF(MyFlags));
/* MY_DIR structure is allocated and completly initialized at this point */
result= (MY_DIR*)buffer;
@@ -263,12 +265,13 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
ALIGN_SIZE(sizeof(DYNAMIC_ARRAY)));
if (my_init_dynamic_array(dir_entries_storage, sizeof(FILEINFO),
- ENTRIES_START_SIZE, ENTRIES_INCREMENT))
+ ENTRIES_START_SIZE, ENTRIES_INCREMENT,
+ MYF(MyFlags)))
{
my_free(buffer);
goto error;
}
- init_alloc_root(names_storage, NAMES_START_SIZE, NAMES_START_SIZE);
+ init_alloc_root(names_storage, NAMES_START_SIZE, NAMES_START_SIZE, MYF(MyFlags));
/* MY_DIR structure is allocated and completly initialized at this point */
result= (MY_DIR*)buffer;
@@ -383,7 +386,7 @@ int my_fstat(File Filedes, MY_STAT *stat_area,
myf MyFlags __attribute__((unused)))
{
DBUG_ENTER("my_fstat");
- DBUG_PRINT("my",("fd: %d MyFlags: %d", Filedes, MyFlags));
+ DBUG_PRINT("my",("fd: %d MyFlags: %lu", Filedes, MyFlags));
#ifdef _WIN32
DBUG_RETURN(my_win_fstat(Filedes, stat_area));
#else
@@ -396,7 +399,7 @@ MY_STAT *my_stat(const char *path, MY_STAT *stat_area, myf my_flags)
{
int m_used;
DBUG_ENTER("my_stat");
- DBUG_PRINT("my", ("path: '%s' stat_area: 0x%lx MyFlags: %d", path,
+ DBUG_PRINT("my", ("path: '%s' stat_area: 0x%lx MyFlags: %lu", path,
(long) stat_area, my_flags));
if ((m_used= (stat_area == NULL)))
diff --git a/mysys/my_lock.c b/mysys/my_lock.c
index 54ec3838b58..0abbc6c3084 100644
--- a/mysys/my_lock.c
+++ b/mysys/my_lock.c
@@ -144,7 +144,7 @@ int my_lock(File fd, int locktype, my_off_t start, my_off_t length,
#endif
DBUG_ENTER("my_lock");
- DBUG_PRINT("my",("fd: %d Op: %d start: %ld Length: %ld MyFlags: %d",
+ DBUG_PRINT("my",("fd: %d Op: %d start: %ld Length: %ld MyFlags: %lu",
fd,locktype,(long) start,(long) length,MyFlags));
if (my_disable_locking && ! (MyFlags & MY_FORCE_LOCK))
DBUG_RETURN(0);
diff --git a/mysys/my_malloc.c b/mysys/my_malloc.c
index 1a9caf71380..8d1a3d926d6 100644
--- a/mysys/my_malloc.c
+++ b/mysys/my_malloc.c
@@ -18,6 +18,60 @@
#include "mysys_err.h"
#include <m_string.h>
+/* If we have our own safemalloc (for debugging) */
+#if defined(SAFEMALLOC)
+#define MALLOC_SIZE_AND_FLAG(p,b) sf_malloc_usable_size(p,b)
+#define MALLOC_PREFIX_SIZE 0
+#define MALLOC_STORE_SIZE(a,b,c,d)
+#define MALLOC_FIX_POINTER_FOR_FREE(a) a
+#else
+/*
+ * We use double as prefix size as this guarantees the correct
+ * alignment on all platforms and will optimize things for
+ * memcpy(), memcmp() etc.
+ */
+#define MALLOC_PREFIX_SIZE (sizeof(double))
+#define MALLOC_SIZE(p) (*(size_t*) ((char*)(p) - MALLOC_PREFIX_SIZE))
+#define MALLOC_STORE_SIZE(p, type_of_p, size, flag) \
+{\
+ *(size_t*) p= (size) | (flag); \
+ (p)= (type_of_p) (((char*) (p)) + MALLOC_PREFIX_SIZE); \
+}
+static inline size_t malloc_size_and_flag(void *p, my_bool *is_thread_specific)
+{
+ size_t size= MALLOC_SIZE(p);
+ *is_thread_specific= (size & 1);
+ return size & ~ (ulonglong) 1;
+}
+#define MALLOC_SIZE_AND_FLAG(p,b) malloc_size_and_flag(p, b);
+#define MALLOC_FIX_POINTER_FOR_FREE(p) (((char*) (p)) - MALLOC_PREFIX_SIZE)
+#endif /* SAFEMALLOC */
+
+static MALLOC_SIZE_CB malloc_size_cb_func= NULL;
+
+/**
+ Inform application that memory usage has changed
+
+ @param size Size of memory segment allocated or freed
+ @param flag 1 if thread specific (allocated by MY_THREAD_SPECIFIC),
+ 0 if system specific.
+
+ The type os size is long long, to be able to handle negative numbers to
+ decrement the memory usage
+*/
+
+static void update_malloc_size(long long size, my_bool is_thread_specific)
+{
+ if (malloc_size_cb_func)
+ malloc_size_cb_func(size, is_thread_specific);
+}
+
+void set_malloc_size_cb(MALLOC_SIZE_CB func)
+{
+ malloc_size_cb_func= func;
+}
+
+
/**
Allocate a sized block of memory.
@@ -30,7 +84,9 @@ void *my_malloc(size_t size, myf my_flags)
{
void* point;
DBUG_ENTER("my_malloc");
- DBUG_PRINT("my",("size: %lu my_flags: %d", (ulong) size, my_flags));
+ DBUG_PRINT("my",("size: %lu my_flags: %lu", (ulong) size, my_flags));
+ compile_time_assert(sizeof(size_t) <= sizeof(double));
+
if (!(my_flags & (MY_WME | MY_FAE)))
my_flags|= my_global_flags;
@@ -38,12 +94,9 @@ void *my_malloc(size_t size, myf my_flags)
if (!size)
size=1;
- point= sf_malloc(size);
- DBUG_EXECUTE_IF("simulate_out_of_memory",
- {
- my_free(point);
- point= NULL;
- });
+ /* We have to align size to be able to store markers in it */
+ size= ALIGN_SIZE(size);
+ point= sf_malloc(size + MALLOC_PREFIX_SIZE, my_flags);
if (point == NULL)
{
@@ -59,8 +112,20 @@ void *my_malloc(size_t size, myf my_flags)
if (my_flags & MY_FAE)
exit(1);
}
- else if (my_flags & MY_ZEROFILL)
- bzero(point, size);
+ else
+ {
+ MALLOC_STORE_SIZE(point, void*, size, test(my_flags & MY_THREAD_SPECIFIC));
+ update_malloc_size(size + MALLOC_PREFIX_SIZE,
+ test(my_flags & MY_THREAD_SPECIFIC));
+ DBUG_EXECUTE_IF("simulate_out_of_memory",
+ {
+ /* my_free() handles memory accounting */
+ my_free(point);
+ point= NULL;
+ });
+ if (my_flags & MY_ZEROFILL)
+ bzero(point, size);
+ }
DBUG_PRINT("exit",("ptr: %p", point));
DBUG_RETURN(point);
}
@@ -79,23 +144,53 @@ void *my_malloc(size_t size, myf my_flags)
void *my_realloc(void *oldpoint, size_t size, myf my_flags)
{
void *point;
+ size_t old_size;
+ my_bool old_flags;
DBUG_ENTER("my_realloc");
- DBUG_PRINT("my",("ptr: %p size: %lu my_flags: %d", oldpoint,
+ DBUG_PRINT("my",("ptr: %p size: %lu my_flags: %lu", oldpoint,
(ulong) size, my_flags));
DBUG_ASSERT(size > 0);
if (!oldpoint && (my_flags & MY_ALLOW_ZERO_PTR))
DBUG_RETURN(my_malloc(size, my_flags));
- if ((point= sf_realloc(oldpoint, size)) == NULL)
+
+ size= ALIGN_SIZE(size);
+ old_size= MALLOC_SIZE_AND_FLAG(oldpoint, &old_flags);
+ /*
+ Test that the new and old area are the same, if not MY_THREAD_MOVE is
+ given
+ */
+ DBUG_ASSERT((test(my_flags & MY_THREAD_SPECIFIC) == old_flags) ||
+ (my_flags & MY_THREAD_MOVE));
+ if ((point= sf_realloc(MALLOC_FIX_POINTER_FOR_FREE(oldpoint),
+ size + MALLOC_PREFIX_SIZE, my_flags)) == NULL)
{
if (my_flags & MY_FREE_ON_ERROR)
+ {
+ /* my_free will take care of size accounting */
my_free(oldpoint);
+ oldpoint= 0;
+ }
if (my_flags & MY_HOLD_ON_ERROR)
DBUG_RETURN(oldpoint);
my_errno=errno;
if (my_flags & (MY_FAE+MY_WME))
my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_WAITTANG), size);
}
+ else
+ {
+ MALLOC_STORE_SIZE(point, void*, size, test(my_flags & MY_THREAD_SPECIFIC));
+ if (test(my_flags & MY_THREAD_SPECIFIC) != old_flags)
+ {
+ /* memory moved between system and thread specific */
+ update_malloc_size(-(longlong) old_size - MALLOC_PREFIX_SIZE, old_flags);
+ update_malloc_size((longlong) size + MALLOC_PREFIX_SIZE,
+ my_flags & MY_THREAD_SPECIFIC);
+ }
+ else
+ update_malloc_size((longlong)size - (longlong)old_size, old_flags);
+ }
+
DBUG_PRINT("exit",("ptr: %p", point));
DBUG_RETURN(point);
}
@@ -112,7 +207,14 @@ void my_free(void *ptr)
{
DBUG_ENTER("my_free");
DBUG_PRINT("my",("ptr: %p", ptr));
- sf_free(ptr);
+ if (ptr)
+ {
+ size_t old_size;
+ my_bool old_flags;
+ old_size= MALLOC_SIZE_AND_FLAG(ptr, &old_flags);
+ update_malloc_size(- (longlong) old_size - MALLOC_PREFIX_SIZE, old_flags);
+ sf_free(MALLOC_FIX_POINTER_FOR_FREE(ptr));
+ }
DBUG_VOID_RETURN;
}
diff --git a/mysys/my_open.c b/mysys/my_open.c
index 645d6709358..5263ba4b5c8 100644
--- a/mysys/my_open.c
+++ b/mysys/my_open.c
@@ -39,7 +39,7 @@ File my_open(const char *FileName, int Flags, myf MyFlags)
{
File fd;
DBUG_ENTER("my_open");
- DBUG_PRINT("my",("Name: '%s' Flags: %d MyFlags: %d",
+ DBUG_PRINT("my",("Name: '%s' Flags: %d MyFlags: %lu",
FileName, Flags, MyFlags));
if (!(MyFlags & (MY_WME | MY_FAE | MY_FFNF)))
MyFlags|= my_global_flags;
@@ -71,7 +71,7 @@ int my_close(File fd, myf MyFlags)
{
int err;
DBUG_ENTER("my_close");
- DBUG_PRINT("my",("fd: %d MyFlags: %d",fd, MyFlags));
+ DBUG_PRINT("my",("fd: %d MyFlags: %lu",fd, MyFlags));
if (!(MyFlags & (MY_WME | MY_FAE)))
MyFlags|= my_global_flags;
diff --git a/mysys/my_pread.c b/mysys/my_pread.c
index 51b4c5f5599..745cde9ec41 100644
--- a/mysys/my_pread.c
+++ b/mysys/my_pread.c
@@ -52,7 +52,7 @@ size_t my_pread(File Filedes, uchar *Buffer, size_t Count, my_off_t offset,
DBUG_ENTER("my_pread");
- DBUG_PRINT("my",("fd: %d Seek: %llu Buffer: %p Count: %lu MyFlags: %d",
+ DBUG_PRINT("my",("fd: %d Seek: %llu Buffer: %p Count: %lu MyFlags: %lu",
Filedes, (ulonglong)offset, Buffer, (ulong)Count, MyFlags));
if (!(MyFlags & (MY_WME | MY_FAE | MY_FNABP)))
@@ -130,7 +130,7 @@ size_t my_pwrite(int Filedes, const uchar *Buffer, size_t Count,
size_t writtenbytes, written;
uint errors;
DBUG_ENTER("my_pwrite");
- DBUG_PRINT("my",("fd: %d Seek: %llu Buffer: %p Count: %lu MyFlags: %d",
+ DBUG_PRINT("my",("fd: %d Seek: %llu Buffer: %p Count: %lu MyFlags: %lu",
Filedes, (ulonglong)offset, Buffer, (ulong)Count, MyFlags));
errors= 0;
written= 0;
diff --git a/mysys/my_read.c b/mysys/my_read.c
index 0afbf3773f3..66b7f6d353d 100644
--- a/mysys/my_read.c
+++ b/mysys/my_read.c
@@ -37,7 +37,7 @@ size_t my_read(File Filedes, uchar *Buffer, size_t Count, myf MyFlags)
{
size_t readbytes, save_count;
DBUG_ENTER("my_read");
- DBUG_PRINT("my",("fd: %d Buffer: %p Count: %lu MyFlags: %d",
+ DBUG_PRINT("my",("fd: %d Buffer: %p Count: %lu MyFlags: %lu",
Filedes, Buffer, (ulong) Count, MyFlags));
save_count= Count;
if (!(MyFlags & (MY_WME | MY_FAE | MY_FNABP)))
diff --git a/mysys/my_redel.c b/mysys/my_redel.c
index d096a5c071c..3c68e69b46a 100644
--- a/mysys/my_redel.c
+++ b/mysys/my_redel.c
@@ -46,7 +46,7 @@ int my_redel(const char *org_name, const char *tmp_name,
{
int error=1;
DBUG_ENTER("my_redel");
- DBUG_PRINT("my",("org_name: '%s' tmp_name: '%s' MyFlags: %d",
+ DBUG_PRINT("my",("org_name: '%s' tmp_name: '%s' MyFlags: %lu",
org_name,tmp_name,MyFlags));
if (my_copystat(org_name,tmp_name,MyFlags) < 0)
diff --git a/mysys/my_rename.c b/mysys/my_rename.c
index b89bc4c8fbd..8a9e6eb3dfd 100644
--- a/mysys/my_rename.c
+++ b/mysys/my_rename.c
@@ -25,7 +25,7 @@ int my_rename(const char *from, const char *to, myf MyFlags)
{
int error = 0;
DBUG_ENTER("my_rename");
- DBUG_PRINT("my",("from %s to %s MyFlags %d", from, to, MyFlags));
+ DBUG_PRINT("my",("from %s to %s MyFlags %lu", from, to, MyFlags));
#if defined(HAVE_RENAME)
#if defined(__WIN__)
diff --git a/mysys/my_seek.c b/mysys/my_seek.c
index 1033d7ac806..e15179a408f 100644
--- a/mysys/my_seek.c
+++ b/mysys/my_seek.c
@@ -48,7 +48,7 @@ my_off_t my_seek(File fd, my_off_t pos, int whence, myf MyFlags)
{
os_off_t newpos= -1;
DBUG_ENTER("my_seek");
- DBUG_PRINT("my",("fd: %d Pos: %llu Whence: %d MyFlags: %d",
+ DBUG_PRINT("my",("fd: %d Pos: %llu Whence: %d MyFlags: %lu",
fd, (ulonglong) pos, whence, MyFlags));
DBUG_ASSERT(pos != MY_FILEPOS_ERROR); /* safety check */
@@ -84,7 +84,7 @@ my_off_t my_tell(File fd, myf MyFlags)
{
os_off_t pos;
DBUG_ENTER("my_tell");
- DBUG_PRINT("my",("fd: %d MyFlags: %d",fd, MyFlags));
+ DBUG_PRINT("my",("fd: %d MyFlags: %lu",fd, MyFlags));
DBUG_ASSERT(fd >= 0);
#if defined (HAVE_TELL) && !defined (_WIN32)
pos= tell(fd);
diff --git a/mysys/my_sync.c b/mysys/my_sync.c
index 88bcb685271..c0afd587ada 100644
--- a/mysys/my_sync.c
+++ b/mysys/my_sync.c
@@ -65,16 +65,13 @@ int my_sync(File fd, myf my_flags)
{
int res;
DBUG_ENTER("my_sync");
-
- DBUG_PRINT("my",("fd: %d my_flags: %d", fd, my_flags));
+ DBUG_PRINT("my",("fd: %d my_flags: %lu", fd, my_flags));
if (my_disable_sync)
DBUG_RETURN(0);
statistic_increment(my_sync_count,&THR_LOCK_open);
- DBUG_PRINT("my",("Fd: %d my_flags: %d", fd, my_flags));
-
if (before_sync_wait)
(*before_sync_wait)();
@@ -158,7 +155,7 @@ int my_sync_dir(const char *dir_name __attribute__((unused)),
int res= 0;
const char *correct_dir_name;
DBUG_ENTER("my_sync_dir");
- DBUG_PRINT("my",("Dir: '%s' my_flags: %d", dir_name, my_flags));
+ DBUG_PRINT("my",("Dir: '%s' my_flags: %lu", dir_name, my_flags));
/* Sometimes the path does not contain an explicit directory */
correct_dir_name= (dir_name[0] == 0) ? cur_dir_name : dir_name;
/*
diff --git a/mysys/my_thr_init.c b/mysys/my_thr_init.c
index ac16189f3a7..56f7ca8b31a 100644
--- a/mysys/my_thr_init.c
+++ b/mysys/my_thr_init.c
@@ -427,7 +427,12 @@ struct st_my_thread_var *_my_thread_var(void)
my_thread_id my_thread_dbug_id()
{
- return my_thread_var->id;
+ /*
+ We need to do this test as some system thread may not yet have called
+ my_thread_init().
+ */
+ struct st_my_thread_var *tmp= my_thread_var;
+ return tmp ? tmp->id : 0;
}
#ifdef DBUG_OFF
diff --git a/mysys/my_write.c b/mysys/my_write.c
index c4cba7a927d..204e2e1488d 100644
--- a/mysys/my_write.c
+++ b/mysys/my_write.c
@@ -25,7 +25,7 @@ size_t my_write(File Filedes, const uchar *Buffer, size_t Count, myf MyFlags)
size_t writtenbytes, written;
uint errors;
DBUG_ENTER("my_write");
- DBUG_PRINT("my",("fd: %d Buffer: %p Count: %lu MyFlags: %d",
+ DBUG_PRINT("my",("fd: %d Buffer: %p Count: %lu MyFlags: %lu",
Filedes, Buffer, (ulong) Count, MyFlags));
errors= 0; written= 0;
if (!(MyFlags & (MY_WME | MY_FAE | MY_FNABP)))
diff --git a/mysys/mysys_priv.h b/mysys/mysys_priv.h
index f5d2f301837..61b7f2897d4 100644
--- a/mysys/mysys_priv.h
+++ b/mysys/mysys_priv.h
@@ -70,12 +70,13 @@ extern PSI_file_key key_file_charset, key_file_cnf;
#endif /* HAVE_PSI_INTERFACE */
#ifdef SAFEMALLOC
-void *sf_malloc(size_t size);
-void *sf_realloc(void *ptr, size_t size);
+void *sf_malloc(size_t size, myf my_flags);
+void *sf_realloc(void *ptr, size_t size, myf my_flags);
void sf_free(void *ptr);
+size_t sf_malloc_usable_size(void *ptr, my_bool *is_thread_specific);
#else
-#define sf_malloc(X) malloc(X)
-#define sf_realloc(X,Y) realloc(X,Y)
+#define sf_malloc(X,Y) malloc(X)
+#define sf_realloc(X,Y,Z) realloc(X,Y)
#define sf_free(X) free(X)
#endif
diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c
index 31f0333725f..6283ed20cca 100644
--- a/mysys/safemalloc.c
+++ b/mysys/safemalloc.c
@@ -58,6 +58,8 @@ struct st_irem
#ifdef HAVE_BACKTRACE
void *frame[SF_REMEMBER_FRAMES]; /* call stack */
#endif
+ uint32 flags; /* Flags passed to malloc */
+ my_thread_id thread_id; /* Which thread did the allocation */
uint32 marker; /* Underrun marker value */
};
@@ -79,11 +81,21 @@ static int bad_ptr(const char *where, void *ptr);
static void free_memory(void *ptr);
static void sf_terminate();
+/* Setup default call to get a thread id for the memory */
+
+my_thread_id default_sf_malloc_dbug_id(void)
+{
+ return my_thread_dbug_id();
+}
+
+my_thread_id (*sf_malloc_dbug_id)(void)= default_sf_malloc_dbug_id;
+
+
/**
allocates memory
*/
-void *sf_malloc(size_t size)
+void *sf_malloc(size_t size, myf my_flags)
{
struct st_irem *irem;
uchar *data;
@@ -114,7 +126,9 @@ void *sf_malloc(size_t size)
data= (uchar*) (irem + 1);
irem->datasize= size;
irem->prev= 0;
+ irem->flags= my_flags;
irem->marker= MAGICSTART;
+ irem->thread_id= sf_malloc_dbug_id();
data[size + 0]= MAGICEND0;
data[size + 1]= MAGICEND1;
data[size + 2]= MAGICEND2;
@@ -154,17 +168,17 @@ void *sf_malloc(size_t size)
return data;
}
-void *sf_realloc(void *ptr, size_t size)
+void *sf_realloc(void *ptr, size_t size, myf my_flags)
{
char *data;
if (!ptr)
- return sf_malloc(size);
+ return sf_malloc(size, my_flags);
if (bad_ptr("Reallocating", ptr))
return 0;
- if ((data= sf_malloc(size)))
+ if ((data= sf_malloc(size, my_flags)))
{
struct st_irem *irem= (struct st_irem *)ptr - 1;
set_if_smaller(size, irem->datasize);
@@ -182,28 +196,25 @@ void sf_free(void *ptr)
free_memory(ptr);
}
-static void free_memory(void *ptr)
-{
- struct st_irem *irem= (struct st_irem *)ptr - 1;
-
- pthread_mutex_lock(&sf_mutex);
- /* Remove this structure from the linked list */
- if (irem->prev)
- irem->prev->next= irem->next;
- else
- sf_malloc_root= irem->next;
+/**
+ Return size of memory block and if block is thread specific
- if (irem->next)
- irem->next->prev= irem->prev;
+ sf_malloc_usable_size()
+ @param ptr Pointer to malloced block
+ @param flags We will store 1 here if block is marked as MY_THREAD_SPECIFIC
+ otherwise 0
- /* Handle the statistics */
- sf_malloc_count--;
- pthread_mutex_unlock(&sf_mutex);
+ @return Size of block
+*/
- /* only trash the data and magic values, but keep the stack trace */
- TRASH_FREE((uchar*)(irem + 1) - 4, irem->datasize + 8);
- free(irem);
- return;
+size_t sf_malloc_usable_size(void *ptr, my_bool *is_thread_specific)
+{
+ struct st_irem *irem= (struct st_irem *)ptr - 1;
+ DBUG_ENTER("sf_malloc_usable_size");
+ *is_thread_specific= test(irem->flags & MY_THREAD_SPECIFIC);
+ DBUG_PRINT("exit", ("size: %lu flags: %lu", (ulong) irem->datasize,
+ (ulong)irem->flags));
+ DBUG_RETURN(irem->datasize);
}
#ifdef HAVE_BACKTRACE
@@ -235,6 +246,39 @@ static void print_stack(void **frame)
#define print_stack(X) fprintf(stderr, "???\n")
#endif
+static void free_memory(void *ptr)
+{
+ struct st_irem *irem= (struct st_irem *)ptr - 1;
+
+ if ((irem->flags & MY_THREAD_SPECIFIC) && irem->thread_id &&
+ irem->thread_id != sf_malloc_dbug_id())
+ {
+ fprintf(stderr, "Warning: %4lu bytes freed by T@%lu, allocated by T@%lu at ",
+ (ulong) irem->datasize,
+ (ulong) sf_malloc_dbug_id(), (ulong) irem->thread_id);
+ print_stack(irem->frame);
+ }
+
+ pthread_mutex_lock(&sf_mutex);
+ /* Remove this structure from the linked list */
+ if (irem->prev)
+ irem->prev->next= irem->next;
+ else
+ sf_malloc_root= irem->next;
+
+ if (irem->next)
+ irem->next->prev= irem->prev;
+
+ /* Handle the statistics */
+ sf_malloc_count--;
+ pthread_mutex_unlock(&sf_mutex);
+
+ /* only trash the data and magic values, but keep the stack trace */
+ TRASH_FREE((uchar*)(irem + 1) - 4, irem->datasize + 8);
+ free(irem);
+ return;
+}
+
static void warn(const char *format,...)
{
va_list args;
@@ -312,9 +356,11 @@ static int sf_sanity()
/**
report on all the memory pieces that have not been free'd
+
+ @param id Id of thread to report. 0 if all
*/
-static void sf_terminate()
+void sf_report_leaked_memory(my_thread_id id)
{
size_t total= 0;
struct st_irem *irem;
@@ -322,21 +368,31 @@ static void sf_terminate()
sf_sanity();
/* Report on all the memory that was allocated but not free'd */
- if (!sf_leaking_memory && sf_malloc_root)
+
+ for (irem= sf_malloc_root; irem; irem= irem->next)
{
- for (irem= sf_malloc_root; irem; irem= irem->next)
+ if (!id || (irem->thread_id == id && irem->flags & MY_THREAD_SPECIFIC))
{
- fprintf(stderr, "Warning: %4lu bytes lost, allocated at ",
- (ulong) irem->datasize);
+ my_thread_id tid = irem->thread_id && irem->flags & MY_THREAD_SPECIFIC ?
+ irem->thread_id : 0;
+ fprintf(stderr, "Warning: %4lu bytes lost, allocated by T@%lu at ",
+ (ulong) irem->datasize,tid);
print_stack(irem->frame);
total+= irem->datasize;
}
+ }
+ if (total)
fprintf(stderr, "Memory lost: %lu bytes in %d chunks\n",
(ulong) total, sf_malloc_count);
- }
+ return;
+}
+
+static void sf_terminate()
+{
+ if (!sf_leaking_memory)
+ sf_report_leaked_memory(0);
pthread_mutex_destroy(&sf_mutex);
- return;
}
#endif
diff --git a/mysys/string.c b/mysys/string.c
index d9791341c60..1263e7824f9 100644
--- a/mysys/string.c
+++ b/mysys/string.c
@@ -175,6 +175,36 @@ my_bool dynstr_append_os_quoted(DYNAMIC_STRING *str, const char *append, ...)
return ret;
}
+my_bool dynstr_append_quoted(DYNAMIC_STRING *str,
+ const char *append, size_t len,
+ char quote)
+{
+ uint additional= (str->alloc_increment ? str->alloc_increment : 10);
+ uint lim= additional;
+ uint i;
+ if (dynstr_realloc(str, len + additional + 2))
+ return TRUE;
+ str->str[str->length++]= quote;
+ for (i= 0; i < len; i++)
+ {
+ register char c= append[i];
+ if (c == quote || c == '\\')
+ {
+ if (!lim)
+ {
+ if (dynstr_realloc(str, additional))
+ return TRUE;
+ lim= additional;
+ }
+ lim--;
+ str->str[str->length++]= '\\';
+ }
+ str->str[str->length++]= c;
+ }
+ str->str[str->length++]= quote;
+ return FALSE;
+}
+
void dynstr_free(DYNAMIC_STRING *str)
{
@@ -193,3 +223,77 @@ void dynstr_reassociate(DYNAMIC_STRING *str, char **ptr, size_t *length,
*alloc_length= str->max_length;
str->str=0;
}
+
+
+/*
+ copy a string from one character set to another
+
+ SYNOPSIS
+ copy_and_convert()
+ to Store result here
+ to_cs Character set of result string
+ from Copy from here
+ from_length Length of from string
+ from_cs From character set
+
+ NOTES
+ 'to' must be big enough as form_length * to_cs->mbmaxlen
+
+ RETURN
+ length of bytes copied to 'to'
+*/
+
+uint32
+copy_and_convert_extended(char *to, uint32 to_length, CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length,
+ CHARSET_INFO *from_cs,
+ uint *errors)
+{
+ int cnvres;
+ my_wc_t wc;
+ const uchar *from_end= (const uchar*) from+from_length;
+ char *to_start= to;
+ uchar *to_end= (uchar*) to+to_length;
+ my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc;
+ my_charset_conv_wc_mb wc_mb= to_cs->cset->wc_mb;
+ uint error_count= 0;
+
+ while (1)
+ {
+ if ((cnvres= (*mb_wc)(from_cs, &wc, (uchar*) from,
+ from_end)) > 0)
+ from+= cnvres;
+ else if (cnvres == MY_CS_ILSEQ)
+ {
+ error_count++;
+ from++;
+ wc= '?';
+ }
+ else if (cnvres > MY_CS_TOOSMALL)
+ {
+ /*
+ A correct multibyte sequence detected
+ But it doesn't have Unicode mapping.
+ */
+ error_count++;
+ from+= (-cnvres);
+ wc= '?';
+ }
+ else
+ break; // Not enough characters
+
+outp:
+ if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0)
+ to+= cnvres;
+ else if (cnvres == MY_CS_ILUNI && wc != '?')
+ {
+ error_count++;
+ wc= '?';
+ goto outp;
+ }
+ else
+ break;
+ }
+ *errors= error_count;
+ return (uint32) (to - to_start);
+}
diff --git a/mysys/tree.c b/mysys/tree.c
index 85770194f25..fe2d3f45d57 100644
--- a/mysys/tree.c
+++ b/mysys/tree.c
@@ -84,8 +84,9 @@ static int test_rb_tree(TREE_ELEMENT *element);
#endif
void init_tree(TREE *tree, size_t default_alloc_size, size_t memory_limit,
- int size, qsort_cmp2 compare, my_bool with_delete,
- tree_element_free free_element, void *custom_arg)
+ int size, qsort_cmp2 compare,
+ tree_element_free free_element, void *custom_arg,
+ myf my_flags)
{
DBUG_ENTER("init_tree");
DBUG_PRINT("enter",("tree: 0x%lx size: %d", (long) tree, size));
@@ -104,6 +105,7 @@ void init_tree(TREE *tree, size_t default_alloc_size, size_t memory_limit,
tree->custom_arg = custom_arg;
tree->null_element.colour=BLACK;
tree->null_element.left=tree->null_element.right=0;
+ tree->my_flags= my_flags;
tree->flag= 0;
if (!free_element && size >= 0 &&
((uint) size <= sizeof(void*) || ((uint) size & (sizeof(void*)-1))))
@@ -125,10 +127,10 @@ void init_tree(TREE *tree, size_t default_alloc_size, size_t memory_limit,
tree->offset_to_key=0; /* use key through pointer */
tree->size_of_element+=sizeof(void*);
}
- if (!(tree->with_delete=with_delete))
+ if (!(tree->with_delete= test(my_flags & MY_TREE_WITH_DELETE)))
{
- init_alloc_root(&tree->mem_root, default_alloc_size, 0);
- tree->mem_root.min_malloc=(sizeof(TREE_ELEMENT)+tree->size_of_element);
+ init_alloc_root(&tree->mem_root, default_alloc_size, 0, MYF(my_flags));
+ tree->mem_root.min_malloc= sizeof(TREE_ELEMENT)+tree->size_of_element;
}
DBUG_VOID_RETURN;
}
@@ -236,7 +238,8 @@ TREE_ELEMENT *tree_insert(TREE *tree, void *key, uint key_size,
key_size+=tree->size_of_element;
if (tree->with_delete)
- element=(TREE_ELEMENT *) my_malloc(alloc_size, MYF(MY_WME));
+ element=(TREE_ELEMENT *) my_malloc(alloc_size,
+ MYF(tree->my_flags | MY_WME));
else
element=(TREE_ELEMENT *) alloc_root(&tree->mem_root,alloc_size);
if (!element)
diff --git a/mysys/waiting_threads.c b/mysys/waiting_threads.c
index 0a9474e68b4..21b2a9ac78a 100644
--- a/mysys/waiting_threads.c
+++ b/mysys/waiting_threads.c
@@ -403,7 +403,7 @@ static void wt_resource_init(uchar *arg)
bzero(rc, sizeof(*rc));
rc_rwlock_init(rc);
mysql_cond_init(key_WT_RESOURCE_cond, &rc->cond, 0);
- my_init_dynamic_array(&rc->owners, sizeof(WT_THD *), 0, 5);
+ my_init_dynamic_array(&rc->owners, sizeof(WT_THD *), 0, 5, MYF(0));
DBUG_VOID_RETURN;
}
@@ -509,7 +509,7 @@ void wt_thd_lazy_init(WT_THD *thd, const ulong *ds, const ulong *ts,
thd->deadlock_search_depth_long= dl;
thd->timeout_long= tl;
/* dynamic array is also initialized lazily - without memory allocations */
- my_init_dynamic_array(&thd->my_resources, sizeof(WT_RESOURCE *), 0, 5);
+ my_init_dynamic_array(&thd->my_resources, sizeof(WT_RESOURCE *), 0, 5, MYF(0));
#ifndef DBUG_OFF
thd->name= my_thread_name();
#endif
diff --git a/plugin/feedback/utils.cc b/plugin/feedback/utils.cc
index a841ac244d1..f7f962deaca 100644
--- a/plugin/feedback/utils.cc
+++ b/plugin/feedback/utils.cc
@@ -340,8 +340,10 @@ int prepare_linux_info()
*/
int fill_linux_info(THD *thd, TABLE_LIST *tables)
{
+#if defined(HAVE_SYS_UTSNAME_H) || defined(TARGET_OS_LINUX)
TABLE *table= tables->table;
CHARSET_INFO *cs= system_charset_info;
+#endif
#ifdef HAVE_SYS_UTSNAME_H
if (have_ubuf)
diff --git a/plugin/semisync/semisync_master.cc b/plugin/semisync/semisync_master.cc
index d1b982468d2..f8eb962b857 100644
--- a/plugin/semisync/semisync_master.cc
+++ b/plugin/semisync/semisync_master.cc
@@ -1051,6 +1051,7 @@ int ReplSemiSyncMaster::readSlaveReply(NET *net, uint32 server_id,
int result = -1;
struct timespec start_ts;
ulong trc_level = trace_level_;
+ LINT_INIT_STRUCT(start_ts);
LINT_INIT_STRUCT(start_ts);
diff --git a/plugin/semisync/semisync_master_plugin.cc b/plugin/semisync/semisync_master_plugin.cc
index b6ff23cd1ad..c811cb1cc9e 100644
--- a/plugin/semisync/semisync_master_plugin.cc
+++ b/plugin/semisync/semisync_master_plugin.cc
@@ -297,10 +297,10 @@ DEF_SHOW_FUNC(avg_trx_wait_time, SHOW_LONG)
static SHOW_VAR semi_sync_master_status_vars[]= {
{"Rpl_semi_sync_master_status",
(char*) &SHOW_FNAME(status),
- SHOW_FUNC},
+ SHOW_SIMPLE_FUNC},
{"Rpl_semi_sync_master_clients",
(char*) &SHOW_FNAME(clients),
- SHOW_FUNC},
+ SHOW_SIMPLE_FUNC},
{"Rpl_semi_sync_master_yes_tx",
(char*) &rpl_semi_sync_master_yes_transactions,
SHOW_LONG},
@@ -309,7 +309,7 @@ static SHOW_VAR semi_sync_master_status_vars[]= {
SHOW_LONG},
{"Rpl_semi_sync_master_wait_sessions",
(char*) &SHOW_FNAME(wait_sessions),
- SHOW_FUNC},
+ SHOW_SIMPLE_FUNC},
{"Rpl_semi_sync_master_no_times",
(char*) &rpl_semi_sync_master_off_times,
SHOW_LONG},
@@ -321,22 +321,22 @@ static SHOW_VAR semi_sync_master_status_vars[]= {
SHOW_LONG},
{"Rpl_semi_sync_master_tx_wait_time",
(char*) &SHOW_FNAME(trx_wait_time),
- SHOW_FUNC},
+ SHOW_SIMPLE_FUNC},
{"Rpl_semi_sync_master_tx_waits",
(char*) &SHOW_FNAME(trx_wait_num),
- SHOW_FUNC},
+ SHOW_SIMPLE_FUNC},
{"Rpl_semi_sync_master_tx_avg_wait_time",
(char*) &SHOW_FNAME(avg_trx_wait_time),
- SHOW_FUNC},
+ SHOW_SIMPLE_FUNC},
{"Rpl_semi_sync_master_net_wait_time",
(char*) &SHOW_FNAME(net_wait_time),
- SHOW_FUNC},
+ SHOW_SIMPLE_FUNC},
{"Rpl_semi_sync_master_net_waits",
(char*) &SHOW_FNAME(net_wait_num),
- SHOW_FUNC},
+ SHOW_SIMPLE_FUNC},
{"Rpl_semi_sync_master_net_avg_wait_time",
(char*) &SHOW_FNAME(avg_net_wait_time),
- SHOW_FUNC},
+ SHOW_SIMPLE_FUNC},
{NULL, NULL, SHOW_LONG},
};
diff --git a/scripts/mysql_system_tables.sql b/scripts/mysql_system_tables.sql
index 288f67dc45a..a55c5f60351 100644
--- a/scripts/mysql_system_tables.sql
+++ b/scripts/mysql_system_tables.sql
@@ -105,3 +105,9 @@ CREATE TABLE IF NOT EXISTS proxies_priv (Host char(60) binary DEFAULT '' NOT NUL
-- Remember for later if proxies_priv table already existed
set @had_proxies_priv_table= @@warning_count != 0;
+
+CREATE TABLE IF NOT EXISTS table_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, cardinality bigint(21) unsigned DEFAULT NULL, PRIMARY KEY (db_name,table_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Tables';
+
+CREATE TABLE IF NOT EXISTS column_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, column_name varchar(64) NOT NULL, min_value varchar(255) DEFAULT NULL, max_value varchar(255) DEFAULT NULL, nulls_ratio decimal(12,4) DEFAULT NULL, avg_length decimal(12,4) DEFAULT NULL, avg_frequency decimal(12,4) DEFAULT NULL, PRIMARY KEY (db_name,table_name,column_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Columns';
+
+CREATE TABLE IF NOT EXISTS index_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, index_name varchar(64) NOT NULL, prefix_arity int(11) unsigned NOT NULL, avg_frequency decimal(12,4) DEFAULT NULL, PRIMARY KEY (db_name,table_name,index_name,prefix_arity) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Indexes';
diff --git a/sql-common/client.c b/sql-common/client.c
index c8ec031c95c..96fc8cf71a2 100644
--- a/sql-common/client.c
+++ b/sql-common/client.c
@@ -891,7 +891,8 @@ void free_old_query(MYSQL *mysql)
DBUG_ENTER("free_old_query");
if (mysql->fields)
free_root(&mysql->field_alloc,MYF(0));
- init_alloc_root(&mysql->field_alloc,8192,0); /* Assume rowlength < 8192 */
+ /* Assume rowlength < 8192 */
+ init_alloc_root(&mysql->field_alloc, 8192, 0, MYF(MY_THREAD_SPECIFIC));
mysql->fields= 0;
mysql->field_count= 0; /* For API */
mysql->warning_count= 0;
@@ -1169,7 +1170,7 @@ static int add_init_command(struct st_mysql_options *options, const char *cmd)
{
options->init_commands= (DYNAMIC_ARRAY*)my_malloc(sizeof(DYNAMIC_ARRAY),
MYF(MY_WME));
- init_dynamic_array(options->init_commands,sizeof(char*),5,5);
+ my_init_dynamic_array(options->init_commands,sizeof(char*),5, 5, MYF(0));
}
if (!(tmp= my_strdup(cmd,MYF(MY_WME))) ||
@@ -1576,7 +1577,8 @@ MYSQL_DATA *cli_read_rows(MYSQL *mysql,MYSQL_FIELD *mysql_fields,
set_mysql_error(mysql, CR_OUT_OF_MEMORY, unknown_sqlstate);
DBUG_RETURN(0);
}
- init_alloc_root(&result->alloc,8192,0); /* Assume rowlength < 8192 */
+ /* Assume rowlength < 8192 */
+ init_alloc_root(&result->alloc, 8192, 0, MYF(MY_THREAD_SPECIFIC));
result->alloc.min_malloc=sizeof(MYSQL_ROWS);
prev_ptr= &result->data;
result->rows=0;
@@ -3205,7 +3207,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user,
*/
DBUG_PRINT("info",("IPV6 getaddrinfo error %d", gai_errno));
set_mysql_extended_error(mysql, CR_UNKNOWN_HOST, unknown_sqlstate,
- ER(CR_UNKNOWN_HOST), host, errno);
+ ER(CR_UNKNOWN_HOST), host, gai_errno);
goto error;
}
@@ -3290,7 +3292,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user,
if (mysql->options.extension && mysql->options.extension->async_context)
net->vio->async_context= mysql->options.extension->async_context;
- if (my_net_init(net, net->vio))
+ if (my_net_init(net, net->vio, MYF(0)))
{
vio_delete(net->vio);
net->vio = 0;
diff --git a/sql-common/client_plugin.c b/sql-common/client_plugin.c
index f31ddb22a6a..5b59c4e0e71 100644
--- a/sql-common/client_plugin.c
+++ b/sql-common/client_plugin.c
@@ -251,7 +251,7 @@ int mysql_client_plugin_init()
bzero(&mysql, sizeof(mysql)); /* dummy mysql for set_mysql_extended_error */
pthread_mutex_init(&LOCK_load_client_plugin, MY_MUTEX_INIT_SLOW);
- init_alloc_root(&mem_root, 128, 128);
+ init_alloc_root(&mem_root, 128, 128, MYF(0));
bzero(&plugin_list, sizeof(plugin_list));
diff --git a/sql-common/my_time.c b/sql-common/my_time.c
index df75bb49c89..fbcf52dbf19 100644
--- a/sql-common/my_time.c
+++ b/sql-common/my_time.c
@@ -104,6 +104,103 @@ my_bool check_date(const MYSQL_TIME *ltime, my_bool not_zero_date,
return FALSE;
}
+static int get_number(uint *val, uint *number_of_fields, const char **str,
+ const char *end)
+{
+ const char *s = *str;
+
+ if (s >= end)
+ return 0;
+
+ if (!my_isdigit(&my_charset_latin1, *s))
+ return 1;
+ *val= *s++ - '0';
+
+ for (; s < end && my_isdigit(&my_charset_latin1, *s); s++)
+ *val= *val * 10 + *s - '0';
+ *str = s;
+ (*number_of_fields)++;
+ return 0;
+}
+
+static int get_digits(uint *val, uint *number_of_fields, const char **str,
+ const char *end, uint length)
+{
+ return get_number(val, number_of_fields, str, min(end, *str + length));
+}
+
+static int get_punct(const char **str, const char *end)
+{
+ if (*str >= end)
+ return 0;
+ if (my_ispunct(&my_charset_latin1, **str))
+ {
+ (*str)++;
+ return 0;
+ }
+ return 1;
+}
+
+static int get_date_time_separator(uint *number_of_fields, ulonglong flags,
+ const char **str, const char *end)
+{
+ const char *s= *str;
+ if (s >= end)
+ return 0;
+
+ if (*s == 'T')
+ {
+ (*str)++;
+ return 0;
+ }
+
+ /*
+ now, this is tricky, for backward compatibility reasons.
+ cast("11:11:11.12.12.12" as datetime) should give 2011-11-11 12:12:12
+ but
+ cast("11:11:11.12.12.12" as time) should give 11:11:11.12
+ that is, a punctuation character can be accepted as a date/time separator
+ only if TIME_DATETIME_ONLY (see str_to_time) is not set.
+ */
+ if (my_ispunct(&my_charset_latin1, *s))
+ {
+ if (flags & TIME_DATETIME_ONLY)
+ {
+ /* see above, returning 1 is not enough, we need hard abort here */
+ *number_of_fields= 0;
+ return 1;
+ }
+
+ (*str)++;
+ return 0;
+ }
+
+ if (!my_isspace(&my_charset_latin1, *s))
+ return 1;
+
+ do
+ {
+ s++;
+ } while (my_isspace(&my_charset_latin1, *s));
+ *str= s;
+ return 0;
+}
+
+static int get_maybe_T(const char **str, const char *end)
+{
+ if (*str < end && **str == 'T')
+ (*str)++;
+ return 0;
+}
+
+static uint skip_digits(const char **str, const char *end)
+{
+ const char *start= *str, *s= *str;
+ while (s < end && my_isdigit(&my_charset_latin1, *s))
+ s++;
+ *str= s;
+ return s - start;
+}
/*
Convert a timestamp string to a MYSQL_TIME value.
@@ -132,24 +229,9 @@ my_bool check_date(const MYSQL_TIME *ltime, my_bool not_zero_date,
The second part may have an optional .###### fraction part.
- NOTES
- This function should work with a format position vector as long as the
- following things holds:
- - All date are kept together and all time parts are kept together
- - Date and time parts must be separated by blank
- - Second fractions must come after second part and be separated
- by a '.'. (The second fractions are optional)
- - AM/PM must come after second fractions (or after seconds if no fractions)
- - Year must always been specified.
- - If time is before date, then we will use datetime format only if
- the argument consist of two parts, separated by space.
- Otherwise we will assume the argument is a date.
- - The hour part must be specified in hour-minute-second order.
-
RETURN VALUES
MYSQL_TIMESTAMP_NONE String wasn't a timestamp, like
[DD [HH:[MM:[SS]]]].fraction.
- l_time is not changed.
MYSQL_TIMESTAMP_DATE DATE string (YY MM and DD parts ok)
MYSQL_TIMESTAMP_DATETIME Full timestamp
MYSQL_TIMESTAMP_ERROR Timestamp with wrong values.
@@ -162,18 +244,10 @@ enum enum_mysql_timestamp_type
str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time,
ulonglong flags, int *was_cut)
{
- uint UNINIT_VAR(field_length), UNINIT_VAR(year_length), digits, i, number_of_fields;
- uint date[MAX_DATE_PARTS], date_len[MAX_DATE_PARTS];
- uint add_hours= 0, start_loop;
- ulong not_zero_date, allow_space;
- my_bool is_internal_format;
- const char *pos, *UNINIT_VAR(last_field_pos);
- const char *end=str+length;
- const uchar *format_position;
- my_bool found_delimitier= 0, found_space= 0;
- uint frac_pos, frac_len;
+ const char *end=str+length, *pos;
+ uint number_of_fields= 0, digits, year_length, not_zero_date;
DBUG_ENTER("str_to_datetime");
- DBUG_PRINT("enter",("str: %.*s",length,str));
+ bzero(l_time, sizeof(*l_time));
if (flags & TIME_TIME_ONLY)
{
@@ -181,7 +255,6 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time,
ret= str_to_time(str, length, l_time, flags, was_cut);
DBUG_RETURN(ret);
}
-
*was_cut= 0;
/* Skip space at start */
@@ -193,254 +266,93 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time,
DBUG_RETURN(MYSQL_TIMESTAMP_NONE);
}
- is_internal_format= 0;
- /* This has to be changed if want to activate different timestamp formats */
- format_position= internal_format_positions;
-
/*
Calculate number of digits in first part.
If length= 8 or >= 14 then year is of format YYYY.
(YYYY-MM-DD, YYYYMMDD, YYYYYMMDDHHMMSS)
*/
- for (pos=str;
- pos != end && (my_isdigit(&my_charset_latin1,*pos) || *pos == 'T');
- pos++)
- ;
+ pos= str;
+ digits= skip_digits(&pos, end);
- digits= (uint) (pos-str);
- start_loop= 0; /* Start of scan loop */
- date_len[format_position[0]]= 0; /* Length of year field */
- if (pos == end || *pos == '.')
+ if (pos < end && *pos == 'T') /* YYYYYMMDDHHMMSSThhmmss is supported too */
{
- /* Found date in internal format (only numbers like YYYYMMDD) */
- year_length= (digits == 4 || digits == 8 || digits >= 14) ? 4 : 2;
- field_length= year_length;
- is_internal_format= 1;
- format_position= internal_format_positions;
+ pos++;
+ digits+= skip_digits(&pos, end);
}
- else
+ if (pos < end && *pos == '.' && digits >= 12) /* YYYYYMMDDHHMMSShhmmss.uuuuuu is supported too */
{
- if (format_position[0] >= 3) /* If year is after HHMMDD */
- {
- /*
- If year is not in first part then we have to determinate if we got
- a date field or a datetime field.
- We do this by checking if there is two numbers separated by
- space in the input.
- */
- while (pos < end && !my_isspace(&my_charset_latin1, *pos))
- pos++;
- while (pos < end && !my_isdigit(&my_charset_latin1, *pos))
- pos++;
- if (pos == end)
- {
- if (flags & TIME_DATETIME_ONLY)
- {
- *was_cut= 1;
- DBUG_RETURN(MYSQL_TIMESTAMP_NONE); /* Can't be a full datetime */
- }
- /* Date field. Set hour, minutes and seconds to 0 */
- date[0]= date[1]= date[2]= date[3]= date[4]= 0;
- start_loop= 5; /* Start with first date part */
- }
- }
-
- field_length= format_position[0] == 0 ? 4 : 2;
+ pos++;
+ skip_digits(&pos, end); // ignore the return value
}
- /*
- Only allow space in the first "part" of the datetime field and:
- - after days, part seconds
- - before and after AM/PM (handled by code later)
-
- 2003-03-03 20:00:20 AM
- 20:00:20.000000 AM 03-03-2000
- */
- i= max((uint) format_position[0], (uint) format_position[1]);
- set_if_bigger(i, (uint) format_position[2]);
- allow_space= ((1 << i) | (1 << format_position[6]));
- allow_space&= (1 | 2 | 4 | 8);
-
- not_zero_date= 0;
- for (i = start_loop;
- i < MAX_DATE_PARTS-1 && str != end &&
- my_isdigit(&my_charset_latin1,*str);
- i++)
+ if (pos == end)
{
- const char *start= str;
- ulong tmp_value= (uint) (uchar) (*str++ - '0');
-
/*
- Internal format means no delimiters; every field has a fixed
- width. Otherwise, we scan until we find a delimiter and discard
- leading zeroes -- except for the microsecond part, where leading
- zeroes are significant, and where we never process more than six
- digits.
+ Found date in internal format
+ (only numbers like [YY]YYMMDD[T][hhmmss[.uuuuuu]])
*/
- my_bool scan_until_delim= !is_internal_format &&
- ((i != format_position[6]));
-
- while (str != end && my_isdigit(&my_charset_latin1,str[0]) &&
- (scan_until_delim || --field_length))
- {
- tmp_value=tmp_value*10 + (ulong) (uchar) (*str - '0');
- str++;
- }
- date_len[i]= (uint) (str - start);
- if (tmp_value > 999999) /* Impossible date part */
- {
- *was_cut= 1;
- DBUG_RETURN(MYSQL_TIMESTAMP_NONE);
- }
- date[i]=tmp_value;
- not_zero_date|= tmp_value;
-
- /* Length of next field */
- field_length= format_position[i+1] == 0 ? 4 : 2;
-
- if ((last_field_pos= str) == end)
- {
- i++; /* Register last found part */
- break;
- }
- /* Allow a 'T' after day to allow CCYYMMDDT type of fields */
- if (i == format_position[2] && *str == 'T')
- {
- str++; /* ISO8601: CCYYMMDDThhmmss */
- continue;
- }
- if (i == format_position[5]) /* Seconds */
- {
- if (*str == '.') /* Followed by part seconds */
- {
- str++;
- field_length= 6; /* 6 digits */
- }
- continue;
- }
- while (str != end &&
- (my_ispunct(&my_charset_latin1,*str) ||
- my_isspace(&my_charset_latin1,*str)))
- {
- if (my_isspace(&my_charset_latin1,*str))
- {
- if (!(allow_space & (1 << i)))
- {
- *was_cut= 1;
- DBUG_RETURN(MYSQL_TIMESTAMP_NONE);
- }
- found_space= 1;
- }
- str++;
- found_delimitier= 1; /* Should be a 'normal' date */
- }
- /* Check if next position is AM/PM */
- if (i == format_position[6]) /* Seconds, time for AM/PM */
- {
- i++; /* Skip AM/PM part */
- if (format_position[7] != 255) /* If using AM/PM */
- {
- if (str+2 <= end && (str[1] == 'M' || str[1] == 'm'))
- {
- if (str[0] == 'p' || str[0] == 'P')
- add_hours= 12;
- else if (str[0] != 'a' && str[0] != 'A')
- continue; /* Not AM/PM */
- str+= 2; /* Skip AM/PM */
- /* Skip space after AM/PM */
- while (str != end && my_isspace(&my_charset_latin1,*str))
- str++;
- }
- }
- }
- last_field_pos= str;
+ year_length= (digits == 4 || digits == 8 || digits >= 14) ? 4 : 2;
+ *was_cut= get_digits(&l_time->year, &number_of_fields, &str, end, year_length)
+ || get_digits(&l_time->month, &number_of_fields, &str, end, 2)
+ || get_digits(&l_time->day, &number_of_fields, &str, end, 2)
+ || get_maybe_T(&str, end)
+ || get_digits(&l_time->hour, &number_of_fields, &str, end, 2)
+ || get_digits(&l_time->minute, &number_of_fields, &str, end, 2)
+ || get_digits(&l_time->second, &number_of_fields, &str, end, 2);
}
- if (found_delimitier && !found_space && (flags & TIME_DATETIME_ONLY))
+ else
{
- *was_cut= 1;
- DBUG_RETURN(MYSQL_TIMESTAMP_NONE); /* Can't be a datetime */
+ const char *start= str;
+ *was_cut = get_number(&l_time->year, &number_of_fields, &str, end);
+ year_length= str - start;
+
+ if (!*was_cut)
+ *was_cut= get_punct(&str, end)
+ || get_number(&l_time->month, &number_of_fields, &str, end)
+ || get_punct(&str, end)
+ || get_number(&l_time->day, &number_of_fields, &str, end)
+ || get_date_time_separator(&number_of_fields, flags, &str, end)
+ || get_number(&l_time->hour, &number_of_fields, &str, end)
+ || get_punct(&str, end)
+ || get_number(&l_time->minute, &number_of_fields, &str, end)
+ || get_punct(&str, end)
+ || get_number(&l_time->second, &number_of_fields, &str, end);
}
- str= last_field_pos;
+ if (number_of_fields < 3)
+ *was_cut= 1;
- number_of_fields= i - start_loop;
- while (i < MAX_DATE_PARTS)
- {
- date_len[i]= 0;
- date[i++]= 0;
- }
+ /* we're ok if date part is correct. even if the rest is truncated */
+ if (*was_cut && number_of_fields < 3)
+ DBUG_RETURN(MYSQL_TIMESTAMP_NONE);
- if (!is_internal_format)
+ if (!*was_cut && str < end && *str == '.')
{
- year_length= date_len[(uint) format_position[0]];
- if (!year_length) /* Year must be specified */
- {
+ uint second_part;
+ const char *start= ++str;
+ *was_cut= get_digits(&second_part, &number_of_fields, &str, end, 6);
+ if (str - start < 6)
+ second_part*= log_10_int[6 - (str - start)];
+ l_time->second_part= second_part;
+ if (skip_digits(&str, end))
*was_cut= 1;
- DBUG_RETURN(MYSQL_TIMESTAMP_NONE);
- }
-
- l_time->year= date[(uint) format_position[0]];
- l_time->month= date[(uint) format_position[1]];
- l_time->day= date[(uint) format_position[2]];
- l_time->hour= date[(uint) format_position[3]];
- l_time->minute= date[(uint) format_position[4]];
- l_time->second= date[(uint) format_position[5]];
-
- frac_pos= (uint) format_position[6];
- frac_len= date_len[frac_pos];
- if (frac_len < 6)
- date[frac_pos]*= (uint) log_10_int[6 - frac_len];
- l_time->second_part= date[frac_pos];
-
- if (format_position[7] != (uchar) 255)
- {
- if (l_time->hour > 12)
- {
- *was_cut= 1;
- goto err;
- }
- l_time->hour= l_time->hour%12 + add_hours;
- }
- }
- else
- {
- l_time->year= date[0];
- l_time->month= date[1];
- l_time->day= date[2];
- l_time->hour= date[3];
- l_time->minute= date[4];
- l_time->second= date[5];
- if (date_len[6] < 6)
- date[6]*= (uint) log_10_int[6 - date_len[6]];
- l_time->second_part=date[6];
}
- l_time->neg= 0;
+
+ not_zero_date = l_time->year || l_time->month || l_time->day ||
+ l_time->hour || l_time->minute || l_time->second ||
+ l_time->second_part;
if (year_length == 2 && not_zero_date)
l_time->year+= (l_time->year < YY_PART_YEAR ? 2000 : 1900);
- if (number_of_fields < 3 ||
- l_time->year > 9999 || l_time->month > 12 ||
- l_time->day > 31 || l_time->hour > 23 ||
- l_time->minute > 59 || l_time->second > 59)
+ if (l_time->year > 9999 || l_time->month > 12 || l_time->day > 31 ||
+ l_time->hour > 23 || l_time->minute > 59 || l_time->second > 59)
{
- /* Only give warning for a zero date if there is some garbage after */
- if (!not_zero_date) /* If zero date */
- {
- for (; str != end ; str++)
- {
- if (!my_isspace(&my_charset_latin1, *str))
- {
- not_zero_date= 1; /* Give warning */
- break;
- }
- }
- }
- *was_cut= test(not_zero_date);
+ *was_cut= 1;
goto err;
}
- if (check_date(l_time, not_zero_date != 0, flags, was_cut))
+ if (check_date(l_time, not_zero_date, flags, was_cut))
goto err;
l_time->time_type= (number_of_fields <= 3 ?
@@ -495,16 +407,15 @@ str_to_time(const char *str, uint length, MYSQL_TIME *l_time,
ulong date[5];
ulonglong value;
const char *end=str+length, *end_of_days;
- my_bool found_days,found_hours;
- uint state;
+ my_bool found_days,found_hours, neg= 0;
+ uint UNINIT_VAR(state);
- l_time->neg=0;
*warning= 0;
for (; str != end && my_isspace(&my_charset_latin1,*str) ; str++)
length--;
if (str != end && *str == '-')
{
- l_time->neg=1;
+ neg=1;
str++;
length--;
}
@@ -527,6 +438,7 @@ str_to_time(const char *str, uint length, MYSQL_TIME *l_time,
}
}
+ l_time->neg= neg;
/* Not a timestamp. Try to get this as a DAYS_TO_SECOND string */
for (value=0; str != end && my_isdigit(&my_charset_latin1,*str) ; str++)
value=value*10L + (long) (*str - '0');
@@ -536,7 +448,6 @@ str_to_time(const char *str, uint length, MYSQL_TIME *l_time,
for (; str != end && my_isspace(&my_charset_latin1, str[0]) ; str++)
;
- LINT_INIT(state);
found_days=found_hours=0;
if ((uint) (end-str) > 1 && str != end_of_days &&
my_isdigit(&my_charset_latin1, *str))
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index ad22446d0c9..4aa7b8f882e 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -39,6 +39,7 @@ ENDIF()
SET (SQL_SOURCE
../sql-common/client.c derror.cc des_key_file.cc
discover.cc ../libmysql/errmsg.c field.cc field_conv.cc
+ filesort_utils.cc
filesort.cc gstream.cc sha2.cc
signal_handler.cc
handler.cc hash_filo.h sql_plugin_services.h
@@ -62,7 +63,8 @@ SET (SQL_SOURCE
sql_list.cc sql_load.cc sql_manager.cc sql_parse.cc
sql_partition.cc sql_plugin.cc sql_prepare.cc sql_rename.cc
debug_sync.cc debug_sync.h
- sql_repl.cc sql_select.cc sql_show.cc sql_state.c sql_string.cc
+ sql_repl.cc sql_select.cc sql_show.cc sql_state.c
+ sql_statistics.cc sql_string.cc
sql_table.cc sql_test.cc sql_trigger.cc sql_udf.cc sql_union.cc
sql_update.cc sql_view.cc strfunc.cc table.cc thr_malloc.cc
sql_time.cc tztime.cc uniques.cc unireg.cc item_xmlfunc.cc
@@ -85,6 +87,7 @@ SET (SQL_SOURCE
gcalc_slicescan.cc gcalc_tools.cc
threadpool_common.cc
../sql-common/mysql_async.c
+ my_apc.cc my_apc.h
${GEN_SOURCES}
${MYSYS_LIBWRAP_SOURCE}
)
diff --git a/sql/bounded_queue.h b/sql/bounded_queue.h
new file mode 100644
index 00000000000..2d4e6cff96d
--- /dev/null
+++ b/sql/bounded_queue.h
@@ -0,0 +1,195 @@
+/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef BOUNDED_QUEUE_INCLUDED
+#define BOUNDED_QUEUE_INCLUDED
+
+#include <string.h>
+#include "my_global.h"
+#include "my_base.h"
+#include "my_sys.h"
+#include "queues.h"
+
+class Sort_param;
+
+/**
+ A priority queue with a fixed, limited size.
+
+ This is a wrapper on top of QUEUE and the queue_xxx() functions.
+ It keeps the top-N elements which are inserted.
+
+ Elements of type Element_type are pushed into the queue.
+ For each element, we call a user-supplied keymaker_function,
+ to generate a key of type Key_type for the element.
+ Instances of Key_type are compared with the user-supplied compare_function.
+
+ The underlying QUEUE implementation needs one extra element for replacing
+ the lowest/highest element when pushing into a full queue.
+ */
+template<typename Element_type, typename Key_type>
+class Bounded_queue
+{
+public:
+ Bounded_queue()
+ {
+ memset(&m_queue, 0, sizeof(m_queue));
+ }
+
+ ~Bounded_queue()
+ {
+ delete_queue(&m_queue);
+ }
+
+ /**
+ Function for making sort-key from input data.
+ @param param Sort parameters.
+ @param to Where to put the key.
+ @param from The input data.
+ */
+ typedef void (*keymaker_function)(Sort_param *param,
+ Key_type *to,
+ Element_type *from);
+
+ /**
+ Function for comparing two keys.
+ @param n Pointer to number of bytes to compare.
+ @param a First key.
+ @param b Second key.
+ @retval -1, 0, or 1 depending on whether the left argument is
+ less than, equal to, or greater than the right argument.
+ */
+ typedef int (*compare_function)(size_t *n, Key_type **a, Key_type **b);
+
+ /**
+ Initialize the queue.
+
+ @param max_elements The size of the queue.
+ @param max_at_top Set to true if you want biggest element on top.
+ false: We keep the n largest elements.
+ pop() will return the smallest key in the result set.
+ true: We keep the n smallest elements.
+ pop() will return the largest key in the result set.
+ @param compare Compare function for elements, takes 3 arguments.
+ If NULL, we use get_ptr_compare(compare_length).
+ @param compare_length Length of the data (i.e. the keys) used for sorting.
+ @param keymaker Function which generates keys for elements.
+ @param sort_param Sort parameters.
+ @param sort_keys Array of pointers to keys to sort.
+
+ @retval 0 OK, 1 Could not allocate memory.
+
+ We do *not* take ownership of any of the input pointer arguments.
+ */
+ int init(ha_rows max_elements, bool max_at_top,
+ compare_function compare, size_t compare_length,
+ keymaker_function keymaker, Sort_param *sort_param,
+ Key_type **sort_keys);
+
+ /**
+ Pushes an element on the queue.
+ If the queue is already full, we discard one element.
+ Calls keymaker_function to generate a key for the element.
+
+ @param element The element to be pushed.
+ */
+ void push(Element_type *element);
+
+ /**
+ Removes the top element from the queue.
+
+ @retval Pointer to the (key of the) removed element.
+
+ @note This function is for unit testing, where we push elements into to the
+ queue, and test that the appropriate keys are retained.
+ Interleaving of push() and pop() operations has not been tested.
+ */
+ Key_type **pop()
+ {
+ // Don't return the extra element to the client code.
+ if (queue_is_full((&m_queue)))
+ queue_remove(&m_queue, 0);
+ DBUG_ASSERT(m_queue.elements > 0);
+ if (m_queue.elements == 0)
+ return NULL;
+ return reinterpret_cast<Key_type**>(queue_remove(&m_queue, 0));
+ }
+
+ /**
+ The number of elements in the queue.
+ */
+ uint num_elements() const { return m_queue.elements; }
+
+ /**
+ Is the queue initialized?
+ */
+ bool is_initialized() const { return m_queue.max_elements > 0; }
+
+private:
+ Key_type **m_sort_keys;
+ size_t m_compare_length;
+ keymaker_function m_keymaker;
+ Sort_param *m_sort_param;
+ st_queue m_queue;
+};
+
+
+template<typename Element_type, typename Key_type>
+int Bounded_queue<Element_type, Key_type>::init(ha_rows max_elements,
+ bool max_at_top,
+ compare_function compare,
+ size_t compare_length,
+ keymaker_function keymaker,
+ Sort_param *sort_param,
+ Key_type **sort_keys)
+{
+ DBUG_ASSERT(sort_keys != NULL);
+
+ m_sort_keys= sort_keys;
+ m_compare_length= compare_length;
+ m_keymaker= keymaker;
+ m_sort_param= sort_param;
+ // init_queue() takes an uint, and also does (max_elements + 1)
+ if (max_elements >= (UINT_MAX - 1))
+ return 1;
+ if (compare == NULL)
+ compare=
+ reinterpret_cast<compare_function>(get_ptr_compare(compare_length));
+ // We allocate space for one extra element, for replace when queue is full.
+ return init_queue(&m_queue, (uint) max_elements + 1,
+ 0, max_at_top,
+ reinterpret_cast<queue_compare>(compare),
+ &m_compare_length, 0, 0);
+}
+
+
+template<typename Element_type, typename Key_type>
+void Bounded_queue<Element_type, Key_type>::push(Element_type *element)
+{
+ DBUG_ASSERT(is_initialized());
+ if (queue_is_full((&m_queue)))
+ {
+ // Replace top element with new key, and re-order the queue.
+ Key_type **pq_top= reinterpret_cast<Key_type **>(queue_top(&m_queue));
+ (*m_keymaker)(m_sort_param, *pq_top, element);
+ queue_replace_top(&m_queue);
+ } else {
+ // Insert new key into the queue.
+ (*m_keymaker)(m_sort_param, m_sort_keys[m_queue.elements], element);
+ queue_insert(&m_queue,
+ reinterpret_cast<uchar*>(&m_sort_keys[m_queue.elements]));
+ }
+}
+
+#endif // BOUNDED_QUEUE_INCLUDED
diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc
index 4097d7fe6e1..28d1109a0b4 100644
--- a/sql/debug_sync.cc
+++ b/sql/debug_sync.cc
@@ -239,7 +239,8 @@ void debug_sync_init_thread(THD *thd)
if (opt_debug_sync_timeout)
{
thd->debug_sync_control= (st_debug_sync_control*)
- my_malloc(sizeof(st_debug_sync_control), MYF(MY_WME | MY_ZEROFILL));
+ my_malloc(sizeof(st_debug_sync_control),
+ MYF(MY_WME | MY_ZEROFILL | MY_THREAD_SPECIFIC));
if (!thd->debug_sync_control)
{
/*
@@ -984,6 +985,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
DBUG_ENTER("debug_sync_eval_action");
DBUG_ASSERT(thd);
DBUG_ASSERT(action_str);
+ DBUG_PRINT("debug_sync", ("action_str='%s'", action_str));
/*
Get debug sync point name. Or a special command.
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
index c41194d1a8d..14b1003d308 100644
--- a/sql/event_data_objects.cc
+++ b/sql/event_data_objects.cc
@@ -209,7 +209,7 @@ Event_basic::Event_basic()
{
DBUG_ENTER("Event_basic::Event_basic");
/* init memory root */
- init_sql_alloc(&mem_root, 256, 512);
+ init_sql_alloc(&mem_root, 256, 512, MYF(0));
dbname.str= name.str= NULL;
dbname.length= name.length= 0;
time_zone= NULL;
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index 14927a66915..b7e82d8b4b9 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -835,9 +835,6 @@ Event_db_repository::update_event(THD *thd, Event_parse_data *parse_data,
(int) table->field[ET_FIELD_ON_COMPLETION]->val_int()))
goto end;
- /* Don't update create on row update. */
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
-
/*
mysql_event_fill_row() calls my_error() in case of error so no need to
handle it here
@@ -1118,17 +1115,15 @@ update_timing_fields_for_event(THD *thd,
TABLE *table= NULL;
Field **fields;
int ret= 1;
- bool save_binlog_row_based;
+ enum_binlog_format save_binlog_format;
MYSQL_TIME time;
-
DBUG_ENTER("Event_db_repository::update_timing_fields_for_event");
/*
Turn off row binlogging of event timing updates. These are not used
for RBR of events replicated to the slave.
*/
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
+ save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
DBUG_ASSERT(thd->security_ctx->master_access & SUPER_ACL);
@@ -1141,8 +1136,6 @@ update_timing_fields_for_event(THD *thd,
goto end;
store_record(table, record[1]);
- /* Don't update create on row update. */
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
my_tz_OFFSET0->gmt_sec_to_TIME(&time, last_executed);
fields[ET_FIELD_LAST_EXECUTED]->set_notnull();
@@ -1163,10 +1156,7 @@ end:
if (table)
close_mysql_tables(thd);
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
+ thd->restore_stmt_binlog_format(save_binlog_format);
DBUG_RETURN(test(ret));
}
diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc
index b41c9e2cda0..8c3e57778ab 100644
--- a/sql/event_scheduler.cc
+++ b/sql/event_scheduler.cc
@@ -153,8 +153,6 @@ void
deinit_event_thread(THD *thd)
{
thd->proc_info= "Clearing";
- DBUG_ASSERT(thd->net.buff != 0);
- net_end(&thd->net);
DBUG_PRINT("exit", ("Event thread finishing"));
mysql_mutex_lock(&LOCK_thread_count);
thread_count--;
@@ -182,15 +180,17 @@ deinit_event_thread(THD *thd)
void
pre_init_event_thread(THD* thd)
{
+ THD *orig_thd= current_thd;
DBUG_ENTER("pre_init_event_thread");
+
+ set_current_thd(thd);
thd->client_capabilities= 0;
thd->security_ctx->master_access= 0;
thd->security_ctx->db_access= 0;
thd->security_ctx->host_or_ip= (char*)my_localhost;
- my_net_init(&thd->net, NULL);
+ my_net_init(&thd->net, NULL, MYF(MY_THREAD_SPECIFIC));
thd->security_ctx->set_user((char*)"event_scheduler");
thd->net.read_timeout= slave_net_timeout;
- thd->slave_thread= 0;
thd->variables.option_bits|= OPTION_AUTO_IS_NULL;
thd->client_capabilities|= CLIENT_MULTI_RESULTS;
mysql_mutex_lock(&LOCK_thread_count);
@@ -208,6 +208,7 @@ pre_init_event_thread(THD* thd)
/* Do not use user-supplied timeout value for system threads. */
thd->variables.lock_wait_timeout= LONG_TIMEOUT;
+ set_current_thd(orig_thd);
DBUG_VOID_RETURN;
}
@@ -403,6 +404,7 @@ Event_scheduler::start()
ret= TRUE;
goto end;
}
+
pre_init_event_thread(new_thd);
new_thd->system_thread= SYSTEM_THREAD_EVENT_SCHEDULER;
new_thd->command= COM_DAEMON;
@@ -414,6 +416,7 @@ Event_scheduler::start()
*/
new_thd->security_ctx->master_access |= SUPER_ACL;
+ /* This should not be marked with MY_THREAD_SPECIFIC */
scheduler_param_value=
(struct scheduler_param *)my_malloc(sizeof(struct scheduler_param), MYF(0));
scheduler_param_value->thd= new_thd;
@@ -433,8 +436,6 @@ Event_scheduler::start()
ret= TRUE;
new_thd->proc_info= "Clearing";
- DBUG_ASSERT(new_thd->net.buff != 0);
- net_end(&new_thd->net);
mysql_mutex_lock(&LOCK_thread_count);
thread_count--;
dec_thread_running();
@@ -534,6 +535,7 @@ Event_scheduler::execute_top(Event_queue_element_for_exec *event_name)
pthread_t th;
int res= 0;
DBUG_ENTER("Event_scheduler::execute_top");
+
if (!(new_thd= new THD()))
goto error;
@@ -568,8 +570,6 @@ error:
if (new_thd)
{
new_thd->proc_info= "Clearing";
- DBUG_ASSERT(new_thd->net.buff != 0);
- net_end(&new_thd->net);
mysql_mutex_lock(&LOCK_thread_count);
thread_count--;
dec_thread_running();
diff --git a/sql/events.cc b/sql/events.cc
index 8b4bab9e3a6..46b13056d7d 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -306,7 +306,8 @@ Events::create_event(THD *thd, Event_parse_data *parse_data,
bool if_not_exists)
{
bool ret;
- bool save_binlog_row_based, event_already_exists;
+ bool event_already_exists;
+ enum_binlog_format save_binlog_format;
DBUG_ENTER("Events::create_event");
if (check_if_system_tables_error())
@@ -338,8 +339,7 @@ Events::create_event(THD *thd, Event_parse_data *parse_data,
Turn off row binlogging of this statement and use statement-based
so that all supporting tables are updated for CREATE EVENT command.
*/
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
+ save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
if (lock_object_name(thd, MDL_key::EVENT,
parse_data->dbname.str, parse_data->name.str))
@@ -398,10 +398,8 @@ Events::create_event(THD *thd, Event_parse_data *parse_data,
}
}
}
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
+
+ thd->restore_stmt_binlog_format(save_binlog_format);
DBUG_RETURN(ret);
}
@@ -431,7 +429,7 @@ Events::update_event(THD *thd, Event_parse_data *parse_data,
LEX_STRING *new_dbname, LEX_STRING *new_name)
{
int ret;
- bool save_binlog_row_based;
+ enum_binlog_format save_binlog_format;
Event_queue_element *new_element;
DBUG_ENTER("Events::update_event");
@@ -478,8 +476,7 @@ Events::update_event(THD *thd, Event_parse_data *parse_data,
Turn off row binlogging of this statement and use statement-based
so that all supporting tables are updated for UPDATE EVENT command.
*/
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
+ save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
if (lock_object_name(thd, MDL_key::EVENT,
parse_data->dbname.str, parse_data->name.str))
@@ -513,11 +510,8 @@ Events::update_event(THD *thd, Event_parse_data *parse_data,
ret= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
}
}
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
+ thd->restore_stmt_binlog_format(save_binlog_format);
DBUG_RETURN(ret);
}
@@ -550,7 +544,7 @@ bool
Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists)
{
int ret;
- bool save_binlog_row_based;
+ enum_binlog_format save_binlog_format;
DBUG_ENTER("Events::drop_event");
if (check_if_system_tables_error())
@@ -563,8 +557,7 @@ Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists)
Turn off row binlogging of this statement and use statement-based so
that all supporting tables are updated for DROP EVENT command.
*/
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
+ save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
if (lock_object_name(thd, MDL_key::EVENT,
dbname.str, name.str))
@@ -578,10 +571,8 @@ Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists)
DBUG_ASSERT(thd->query() && thd->query_length());
ret= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
}
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
+
+ thd->restore_stmt_binlog_format(save_binlog_format);
DBUG_RETURN(ret);
}
@@ -888,7 +879,7 @@ end:
}
delete thd;
/* Remember that we don't have a THD */
- my_pthread_setspecific_ptr(THR_THD, NULL);
+ set_current_thd(0);
DBUG_RETURN(res);
}
diff --git a/sql/field.cc b/sql/field.cc
index 4667702c145..d4468ba3c5b 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -50,11 +50,6 @@
Instansiate templates and static variables
*****************************************************************************/
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class List<Create_field>;
-template class List_iterator<Create_field>;
-#endif
-
static const char *zero_timestamp="0000-00-00 00:00:00.000000";
/* number of bytes to store second_part part of the TIMESTAMP(N) */
@@ -76,7 +71,7 @@ const char field_separator=',';
((ulong) ((LL(1) << min(arg, 4) * 8) - LL(1)))
#define ASSERT_COLUMN_MARKED_FOR_READ DBUG_ASSERT(!table || (!table->read_set || bitmap_is_set(table->read_set, field_index)))
-#define ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED DBUG_ASSERT(!table || (!table->write_set || bitmap_is_set(table->write_set, field_index) || bitmap_is_set(table->vcol_set, field_index)))
+#define ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED DBUG_ASSERT(is_stat_field || !table || (!table->write_set || bitmap_is_set(table->write_set, field_index) || bitmap_is_set(table->vcol_set, field_index)))
#define FLAGSTR(S,F) ((S) & (F) ? #F " " : "")
@@ -1195,11 +1190,11 @@ int Field_num::check_int(CHARSET_INFO *cs, const char *str, int length,
if (str == int_end || error == MY_ERRNO_EDOM)
{
ErrConvString err(str, length, cs);
- push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(get_thd(), MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"integer", err.ptr(), field_name,
- (ulong) table->in_use->warning_info->current_row_for_warning());
+ (ulong) get_thd()->warning_info->current_row_for_warning());
return 1;
}
/* Test if we have garbage at the end of the given string. */
@@ -1268,7 +1263,7 @@ bool Field_num::get_int(CHARSET_INFO *cs, const char *from, uint len,
goto out_of_range;
}
}
- if (table->in_use->count_cuted_fields &&
+ if (get_thd()->count_cuted_fields &&
check_int(cs, from, len, end, error))
return 1;
return 0;
@@ -1339,13 +1334,16 @@ Field::Field(uchar *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
option_struct(0), key_start(0), part_of_key(0),
part_of_key_not_clustered(0), part_of_sortkey(0),
unireg_check(unireg_check_arg), field_length(length_arg),
- null_bit(null_bit_arg), is_created_from_null_item(FALSE), vcol_info(0),
+ null_bit(null_bit_arg), is_created_from_null_item(FALSE),
+ read_stats(NULL), collected_stats(0),
+ vcol_info(0),
stored_in_db(TRUE)
{
flags=null_ptr ? 0: NOT_NULL_FLAG;
comment.str= (char*) "";
comment.length=0;
- field_index= 0;
+ field_index= 0;
+ is_stat_field= FALSE;
}
@@ -1445,10 +1443,11 @@ int Field::store(const char *to, uint length, CHARSET_INFO *cs,
enum_check_fields check_level)
{
int res;
- enum_check_fields old_check_level= table->in_use->count_cuted_fields;
- table->in_use->count_cuted_fields= check_level;
+ THD *thd= get_thd();
+ enum_check_fields old_check_level= thd->count_cuted_fields;
+ thd->count_cuted_fields= check_level;
res= store(to, length, cs);
- table->in_use->count_cuted_fields= old_check_level;
+ thd->count_cuted_fields= old_check_level;
return res;
}
@@ -1832,6 +1831,10 @@ Field *Field::new_field(MEM_ROOT *root, TABLE *new_table,
tmp->key_start.init(0);
tmp->part_of_key.init(0);
tmp->part_of_sortkey.init(0);
+ /*
+ TODO: it is not clear why this method needs to reset unireg_check.
+ Try not to reset it, or explain why it needs to be reset.
+ */
tmp->unireg_check= Field::NONE;
tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG |
ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG);
@@ -1870,6 +1873,32 @@ Field *Field::clone(MEM_ROOT *root, TABLE *new_table)
}
+
+Field *Field::clone(MEM_ROOT *root, TABLE *new_table, my_ptrdiff_t diff,
+ bool stat_flag)
+{
+ Field *tmp;
+ if ((tmp= (Field*) memdup_root(root,(char*) this,size_of())))
+ {
+ tmp->init(new_table);
+ tmp->move_field_offset(diff);
+ }
+ tmp->is_stat_field= stat_flag;
+ return tmp;
+}
+
+
+Field *Field::clone(MEM_ROOT *root, my_ptrdiff_t diff)
+{
+ Field *tmp;
+ if ((tmp= (Field*) memdup_root(root,(char*) this,size_of())))
+ {
+ tmp->move_field_offset(diff);
+ }
+ return tmp;
+}
+
+
/****************************************************************************
Field_null, a field that always return NULL
****************************************************************************/
@@ -1984,7 +2013,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
uchar *left_wall,*right_wall;
uchar tmp_char;
/*
- To remember if table->in_use->cuted_fields has already been incremented,
+ To remember if get_thd()->cuted_fields has already been incremented,
to do that only once
*/
bool is_cuted_fields_incr=0;
@@ -2075,7 +2104,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
it makes the code easer to read.
*/
- if (table->in_use->count_cuted_fields)
+ if (get_thd()->count_cuted_fields)
{
// Skip end spaces
for (;from != end && my_isspace(&my_charset_bin, *from); from++) ;
@@ -2227,7 +2256,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
/*
Write digits of the frac_% parts ;
- Depending on table->in_use->count_cutted_fields, we may also want
+ Depending on get_thd()->count_cutted_fields, we may also want
to know if some non-zero tail of these parts will
be truncated (for example, 0.002->0.00 will generate a warning,
while 0.000->0.00 will not)
@@ -2245,7 +2274,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
{
if (pos == right_wall)
{
- if (table->in_use->count_cuted_fields && !is_cuted_fields_incr)
+ if (get_thd()->count_cuted_fields && !is_cuted_fields_incr)
break; // Go on below to see if we lose non zero digits
return 0;
}
@@ -2666,20 +2695,21 @@ int Field_new_decimal::store(const char *from, uint length,
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int err;
my_decimal decimal_value;
+ THD *thd= get_thd();
DBUG_ENTER("Field_new_decimal::store(char*)");
if ((err= str2my_decimal(E_DEC_FATAL_ERROR &
~(E_DEC_OVERFLOW | E_DEC_BAD_NUM),
from, length, charset_arg,
&decimal_value)) &&
- table->in_use->abort_on_warning)
+ thd->abort_on_warning)
{
ErrConvString errmsg(from, length, &my_charset_bin);
- push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"decimal", errmsg.ptr(), field_name,
- (ulong) table->in_use->warning_info->current_row_for_warning());
+ (ulong) thd->warning_info->current_row_for_warning());
DBUG_RETURN(err);
}
@@ -2695,11 +2725,11 @@ int Field_new_decimal::store(const char *from, uint length,
case E_DEC_BAD_NUM:
{
ErrConvString errmsg(from, length, &my_charset_bin);
- push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"decimal", errmsg.ptr(), field_name,
- (ulong) table->in_use->warning_info->
+ (ulong) thd->warning_info->
current_row_for_warning());
my_decimal_set_zero(&decimal_value);
break;
@@ -2727,6 +2757,7 @@ int Field_new_decimal::store(double nr)
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
my_decimal decimal_value;
int err;
+ THD *thd= get_thd();
DBUG_ENTER("Field_new_decimal::store(double)");
err= double2my_decimal(E_DEC_FATAL_ERROR & ~E_DEC_OVERFLOW, nr,
@@ -2736,11 +2767,11 @@ int Field_new_decimal::store(double nr)
if (check_overflow(err))
set_value_on_overflow(&decimal_value, decimal_value.sign());
/* Only issue a warning if store_value doesn't issue an warning */
- table->in_use->got_warning= 0;
+ thd->got_warning= 0;
}
if (store_value(&decimal_value))
err= 1;
- else if (err && !table->in_use->got_warning)
+ else if (err && !thd->got_warning)
err= warn_if_overflow(err);
DBUG_RETURN(err);
}
@@ -2758,11 +2789,11 @@ int Field_new_decimal::store(longlong nr, bool unsigned_val)
if (check_overflow(err))
set_value_on_overflow(&decimal_value, decimal_value.sign());
/* Only issue a warning if store_value doesn't issue an warning */
- table->in_use->got_warning= 0;
+ get_thd()->got_warning= 0;
}
if (store_value(&decimal_value))
err= 1;
- else if (err && !table->in_use->got_warning)
+ else if (err && !get_thd()->got_warning)
err= warn_if_overflow(err);
return err;
}
@@ -3658,7 +3689,7 @@ longlong Field_long::val_int(void)
ASSERT_COLUMN_MARKED_FOR_READ;
int32 j;
/* See the comment in Field_long::store(long long) */
- DBUG_ASSERT(table->in_use == current_thd);
+ DBUG_ASSERT(!table || table->in_use == current_thd);
j=sint4korr(ptr);
return unsigned_flag ? (longlong) (uint32) j : (longlong) j;
}
@@ -3740,7 +3771,7 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
- else if (table->in_use->count_cuted_fields &&
+ else if (get_thd()->count_cuted_fields &&
check_int(cs, from, len, end, error))
error= 1;
else
@@ -3892,7 +3923,7 @@ int Field_float::store(const char *from,uint len,CHARSET_INFO *cs)
char *end;
double nr= my_strntod(cs,(char*) from,len,&end,&error);
if (error || (!len || ((uint) (end-from) != len &&
- table->in_use->count_cuted_fields)))
+ get_thd()->count_cuted_fields)))
{
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
(error ? ER_WARN_DATA_OUT_OF_RANGE : WARN_DATA_TRUNCATED), 1);
@@ -4080,7 +4111,7 @@ int Field_double::store(const char *from,uint len,CHARSET_INFO *cs)
char *end;
double nr= my_strntod(cs,(char*) from, len, &end, &error);
if (error || (!len || ((uint) (end-from) != len &&
- table->in_use->count_cuted_fields)))
+ get_thd()->count_cuted_fields)))
{
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
(error ? ER_WARN_DATA_OUT_OF_RANGE : WARN_DATA_TRUNCATED), 1);
@@ -4384,16 +4415,10 @@ void Field_double::sql_type(String &res) const
2038-01-01 00:00:00 UTC stored as number of seconds since Unix
Epoch in UTC.
- Up to one of timestamps columns in the table can be automatically
- set on row update and/or have NOW() as default value.
- TABLE::timestamp_field points to Field object for such timestamp with
- auto-set-on-update. TABLE::time_stamp holds offset in record + 1 for this
- field, and is used by handler code which performs updates required.
-
Actually SQL-99 says that we should allow niladic functions (like NOW())
- as defaults for any field. Current limitations (only NOW() and only
- for one TIMESTAMP field) are because of restricted binary .frm format
- and should go away in the future.
+ as defaults for any field. The current limitation (only NOW() and only
+ for TIMESTAMP and DATETIME fields) are because of restricted binary .frm
+ format and should go away in the future.
Also because of this limitation of binary .frm format we use 5 different
unireg_check values with TIMESTAMP field to distinguish various cases of
@@ -4434,10 +4459,12 @@ Field_timestamp::Field_timestamp(uchar *ptr_arg, uint32 len_arg,
{
/* For 4.0 MYD and 4.0 InnoDB compatibility */
flags|= UNSIGNED_FLAG | BINARY_FLAG;
- if (unireg_check != NONE && !share->timestamp_field)
+ if (unireg_check != NONE)
{
- /* This timestamp has auto-update */
- share->timestamp_field= this;
+ /*
+ We mark the flag with TIMESTAMP_FLAG to indicate to the client that
+ this field will be automaticly updated on insert.
+ */
flags|= TIMESTAMP_FLAG;
if (unireg_check != TIMESTAMP_DN_FIELD)
flags|= ON_UPDATE_NOW_FLAG;
@@ -4445,40 +4472,6 @@ Field_timestamp::Field_timestamp(uchar *ptr_arg, uint32 len_arg,
}
-/**
- Get auto-set type for TIMESTAMP field.
-
- Returns value indicating during which operations this TIMESTAMP field
- should be auto-set to current timestamp.
-*/
-timestamp_auto_set_type Field_timestamp::get_auto_set_type() const
-{
- switch (unireg_check)
- {
- case TIMESTAMP_DN_FIELD:
- return TIMESTAMP_AUTO_SET_ON_INSERT;
- case TIMESTAMP_UN_FIELD:
- return TIMESTAMP_AUTO_SET_ON_UPDATE;
- case TIMESTAMP_OLD_FIELD:
- /*
- Although we can have several such columns in legacy tables this
- function should be called only for first of them (i.e. the one
- having auto-set property).
- */
- DBUG_ASSERT(table->timestamp_field == this);
- /* Fall-through */
- case TIMESTAMP_DNUN_FIELD:
- return TIMESTAMP_AUTO_SET_ON_BOTH;
- default:
- /*
- Normally this function should not be called for TIMESTAMPs without
- auto-set property.
- */
- DBUG_ASSERT(0);
- return TIMESTAMP_NO_AUTO_SET;
- }
-}
-
my_time_t Field_timestamp::get_timestamp(ulong *sec_part) const
{
ASSERT_COLUMN_MARKED_FOR_READ;
@@ -4528,10 +4521,11 @@ int Field_timestamp::store_TIME_with_warning(THD *thd, MYSQL_TIME *l_time,
int Field_timestamp::store_time_dec(MYSQL_TIME *ltime, uint dec)
{
- THD *thd= table->in_use;
int unused;
MYSQL_TIME l_time= *ltime;
ErrConvTime str(ltime);
+ THD *thd= get_thd();
+
bool valid= !check_date(&l_time, pack_time(&l_time) != 0,
(thd->variables.sql_mode & MODE_NO_ZERO_DATE) |
MODE_NO_ZERO_IN_DATE, &unused);
@@ -4546,7 +4540,7 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs)
int error;
int have_smth_to_conv;
ErrConvString str(from, len, cs);
- THD *thd= table->in_use;
+ THD *thd= get_thd();
/* We don't want to store invalid or fuzzy datetime values in TIMESTAMP */
have_smth_to_conv= (str_to_datetime(cs, from, len, &l_time,
@@ -4563,7 +4557,7 @@ int Field_timestamp::store(double nr)
MYSQL_TIME l_time;
int error;
ErrConvDouble str(nr);
- THD *thd= table->in_use;
+ THD *thd= get_thd();
longlong tmp= double_to_datetime(nr, &l_time, (thd->variables.sql_mode &
MODE_NO_ZERO_DATE) |
@@ -4577,7 +4571,7 @@ int Field_timestamp::store(longlong nr, bool unsigned_val)
MYSQL_TIME l_time;
int error;
ErrConvInteger str(nr);
- THD *thd= table->in_use;
+ THD *thd= get_thd();
/* We don't want to store invalid or fuzzy datetime values in TIMESTAMP */
longlong tmp= number_to_datetime(nr, 0, &l_time, (thd->variables.sql_mode &
@@ -4669,7 +4663,7 @@ String *Field_timestamp::val_str(String *val_buffer, String *val_ptr)
bool Field_timestamp::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{
- THD *thd= table->in_use;
+ THD *thd= get_thd();
thd->time_zone_used= 1;
ulong sec_part;
my_time_t temp= get_timestamp(&sec_part);
@@ -4722,12 +4716,40 @@ void Field_timestamp::sql_type(String &res) const
int Field_timestamp::set_time()
{
- THD *thd= table->in_use;
+ THD *thd= get_thd();
set_notnull();
store_TIME(thd->query_start(), 0);
return 0;
}
+/**
+ Mark the field as having an explicit default value.
+
+ @param value if available, the value that the field is being set to
+
+ @note
+ Fields that have an explicit default value should not be updated
+ automatically via the DEFAULT or ON UPDATE functions. The functions
+ that deal with data change functionality (INSERT/UPDATE/LOAD),
+ determine if there is an explicit value for each field before performing
+ the data change, and call this method to mark the field.
+
+ For timestamp columns, the only case where a column is not marked
+ as been given a value are:
+ - It's explicitly assigned with DEFAULT
+ - We assign NULL to a timestamp field that is defined as NOT NULL.
+ This is how MySQL has worked since it's start.
+*/
+
+void Field_timestamp::set_explicit_default(Item *value)
+{
+ if (((value->type() == Item::DEFAULT_VALUE_ITEM &&
+ !((Item_default_value*)value)->arg) ||
+ (!maybe_null() && value->is_null())))
+ return;
+ set_has_explicit_value();
+}
+
void Field_timestamp_hires::sql_type(String &res) const
{
CHARSET_INFO *cs=res.charset();
@@ -4871,7 +4893,7 @@ int Field_timestamp_hires::store_decimal(const my_decimal *d)
int error;
MYSQL_TIME ltime;
longlong tmp;
- THD *thd= table->in_use;
+ THD *thd= get_thd();
ErrConvDecimal str(d);
if (my_decimal2seconds(d, &nr, &sec_part))
@@ -4889,7 +4911,7 @@ int Field_timestamp_hires::store_decimal(const my_decimal *d)
int Field_timestamp_hires::set_time()
{
- THD *thd= table->in_use;
+ THD *thd= get_thd();
set_notnull();
store_TIME(thd->query_start(), thd->query_start_sec_part());
return 0;
@@ -5008,7 +5030,7 @@ int Field_temporal::store(const char *from,uint len,CHARSET_INFO *cs)
MYSQL_TIME ltime;
int error;
enum enum_mysql_timestamp_type func_res;
- THD *thd= table->in_use;
+ THD *thd= get_thd();
ErrConvString str(from, len, cs);
func_res= str_to_datetime(cs, from, len, &ltime,
@@ -5025,7 +5047,7 @@ int Field_temporal::store(double nr)
{
int error= 0;
MYSQL_TIME ltime;
- THD *thd= table->in_use;
+ THD *thd= get_thd();
ErrConvDouble str(nr);
longlong tmp= double_to_datetime(nr, &ltime,
@@ -5043,7 +5065,7 @@ int Field_temporal::store(longlong nr, bool unsigned_val)
int error;
MYSQL_TIME ltime;
longlong tmp;
- THD *thd= table->in_use;
+ THD *thd= get_thd();
ErrConvInteger str(nr);
tmp= number_to_datetime(nr, 0, &ltime, (TIME_FUZZY_DATE |
@@ -5108,7 +5130,7 @@ int Field_time::store(const char *from,uint len,CHARSET_INFO *cs)
int was_cut;
int have_smth_to_conv=
str_to_time(cs, from, len, &ltime,
- table->in_use->variables.sql_mode &
+ get_thd()->variables.sql_mode &
(MODE_NO_ZERO_DATE | MODE_NO_ZERO_IN_DATE |
MODE_INVALID_DATES),
&was_cut) > MYSQL_TIMESTAMP_ERROR;
@@ -5214,7 +5236,7 @@ String *Field_time::val_str(String *val_buffer,
bool Field_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{
- THD *thd= table->in_use;
+ THD *thd= get_thd();
if (!(fuzzydate & (TIME_FUZZY_DATE|TIME_TIME_ONLY)))
{
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
@@ -5404,7 +5426,7 @@ int Field_year::store(const char *from, uint len,CHARSET_INFO *cs)
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
}
- if (table->in_use->count_cuted_fields &&
+ if (get_thd()->count_cuted_fields &&
(error= check_int(cs, from, len, end, error)))
{
if (error == 1) /* empty or incorrect string */
@@ -5851,6 +5873,20 @@ void Field_datetime::sql_type(String &res) const
res.set_ascii(STRING_WITH_LEN("datetime"));
}
+
+int Field_datetime::set_time()
+{
+ THD *thd= table->in_use;
+ MYSQL_TIME now_time;
+ thd->variables.time_zone->gmt_sec_to_TIME(&now_time, thd->query_start());
+ now_time.second_part= thd->query_start_sec_part();
+ set_notnull();
+ store_TIME(&now_time);
+ thd->time_zone_used= 1;
+ return 0;
+}
+
+
void Field_datetime_hires::store_TIME(MYSQL_TIME *ltime)
{
ulonglong packed= sec_part_shift(pack_time(ltime), dec);
@@ -5864,7 +5900,7 @@ int Field_datetime_hires::store_decimal(const my_decimal *d)
int error;
MYSQL_TIME ltime;
longlong tmp;
- THD *thd= table->in_use;
+ THD *thd= get_thd();
ErrConvDecimal str(d);
if (my_decimal2seconds(d, &nr, &sec_part))
@@ -6001,7 +6037,9 @@ check_string_copy_error(Field_str *field,
{
const char *pos;
char tmp[32];
- THD *thd= field->table->in_use;
+ THD *thd;
+
+ thd= field->get_thd();
if (!(pos= well_formed_error_pos) &&
!(pos= cannot_convert_error_pos))
@@ -6043,11 +6081,12 @@ int
Field_longstr::report_if_important_data(const char *pstr, const char *end,
bool count_spaces)
{
- if ((pstr < end) && table->in_use->count_cuted_fields)
+ THD *thd= get_thd();
+ if ((pstr < end) && thd->count_cuted_fields)
{
if (test_if_important_data(field_charset, pstr, end))
{
- if (table->in_use->abort_on_warning)
+ if (thd->abort_on_warning)
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
@@ -6074,7 +6113,7 @@ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
const char *from_end_pos;
/* See the comment for Field_long::store(long long) */
- DBUG_ASSERT(table->in_use == current_thd);
+ DBUG_ASSERT(!table || table->in_use == current_thd);
copy_length= well_formed_copy_nchars(field_charset,
(char*) ptr, field_length,
@@ -6120,7 +6159,7 @@ int Field_str::store(double nr)
if (error)
{
- if (table->in_use->abort_on_warning)
+ if (get_thd()->abort_on_warning)
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
@@ -6180,7 +6219,7 @@ double Field_string::val_real(void)
double result;
result= my_strntod(cs,(char*) ptr,field_length,&end,&error);
- if (!table->in_use->no_errors &&
+ if (!get_thd()->no_errors &&
(error || (field_length != (uint32)(end - (char*) ptr) &&
!check_if_only_end_space(cs, end,
(char*) ptr + field_length))))
@@ -6204,7 +6243,7 @@ longlong Field_string::val_int(void)
longlong result;
result= my_strntoll(cs, (char*) ptr,field_length,10,&end,&error);
- if (!table->in_use->no_errors &&
+ if (!get_thd()->no_errors &&
(error || (field_length != (uint32)(end - (char*) ptr) &&
!check_if_only_end_space(cs, end,
(char*) ptr + field_length))))
@@ -6224,9 +6263,9 @@ String *Field_string::val_str(String *val_buffer __attribute__((unused)),
{
ASSERT_COLUMN_MARKED_FOR_READ;
/* See the comment for Field_long::store(long long) */
- DBUG_ASSERT(table->in_use == current_thd);
+ DBUG_ASSERT(!table || table->in_use == current_thd);
uint length;
- if (table->in_use->variables.sql_mode &
+ if (get_thd()->variables.sql_mode &
MODE_PAD_CHAR_TO_FULL_LENGTH)
length= my_charpos(field_charset, ptr, ptr + field_length,
field_length / field_charset->mbmaxlen);
@@ -6243,7 +6282,7 @@ my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
ASSERT_COLUMN_MARKED_FOR_READ;
int err= str2my_decimal(E_DEC_FATAL_ERROR, (char*) ptr, field_length,
charset(), decimal_value);
- if (!table->in_use->no_errors && err)
+ if (!get_thd()->no_errors && err)
{
ErrConvString errmsg((char*) ptr, field_length, charset());
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
@@ -6627,7 +6666,7 @@ double Field_varstring::val_real(void)
uint length= length_bytes == 1 ? (uint) *ptr : uint2korr(ptr);
result= my_strntod(cs, (char*)ptr+length_bytes, length, &end, &error);
- if (!table->in_use->no_errors &&
+ if (!get_thd()->no_errors &&
(error || (length != (uint)(end - (char*)ptr+length_bytes) &&
!check_if_only_end_space(cs, end, (char*)ptr+length_bytes+length))))
{
@@ -6650,7 +6689,7 @@ longlong Field_varstring::val_int(void)
longlong result= my_strntoll(cs, (char*) ptr+length_bytes, length, 10,
&end, &error);
- if (!table->in_use->no_errors &&
+ if (!get_thd()->no_errors &&
(error || (length != (uint)(end - (char*)ptr+length_bytes) &&
!check_if_only_end_space(cs, end, (char*)ptr+length_bytes+length))))
{
@@ -6679,7 +6718,7 @@ my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
int error= str2my_decimal(E_DEC_FATAL_ERROR, (char*) ptr+length_bytes, length,
cs, decimal_value);
- if (!table->in_use->no_errors && error)
+ if (!get_thd()->no_errors && error)
{
push_numerical_conversion_warning(current_thd, (char*)ptr+length_bytes,
length, cs, "DECIMAL",
@@ -7660,7 +7699,7 @@ int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs)
tmp=0;
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
}
- if (!table->in_use->count_cuted_fields)
+ if (!get_thd()->count_cuted_fields)
err= 0;
}
else
@@ -7684,7 +7723,7 @@ int Field_enum::store(longlong nr, bool unsigned_val)
if ((ulonglong) nr > typelib->count || nr == 0)
{
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
- if (nr != 0 || table->in_use->count_cuted_fields)
+ if (nr != 0 || get_thd()->count_cuted_fields)
{
nr= 0;
error= 1;
@@ -8214,7 +8253,7 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
{
set_rec_bits((1 << bit_len) - 1, bit_ptr, bit_ofs, bit_len);
memset(ptr, 0xff, bytes_in_rec);
- if (table->in_use->really_abort_on_warning())
+ if (get_thd()->really_abort_on_warning())
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
@@ -8350,7 +8389,9 @@ int Field_bit::cmp_max(const uchar *a, const uchar *b, uint max_len)
if ((flag= (int) (bits_a - bits_b)))
return flag;
}
- return memcmp(a, b, field_length);
+ if (!bytes_in_rec)
+ return 0;
+ return memcmp(a, b, bytes_in_rec);
}
@@ -8649,7 +8690,7 @@ int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
memset(ptr, 0xff, bytes_in_rec);
if (bits)
*ptr&= ((1 << bits) - 1); /* set first uchar */
- if (table->in_use->really_abort_on_warning())
+ if (get_thd()->really_abort_on_warning())
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
@@ -8872,16 +8913,37 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
{
uint sign_len, allowed_type_modifier= 0;
ulong max_field_charlength= MAX_FIELD_CHARLENGTH;
+ const bool on_update_is_function=
+ (fld_on_update_value != NULL &&
+ fld_on_update_value->type() == Item::FUNC_ITEM);
DBUG_ENTER("Create_field::init()");
field= 0;
field_name= fld_name;
- def= fld_default_value;
flags= fld_type_modifier;
option_list= create_opt;
- unireg_check= (fld_type_modifier & AUTO_INCREMENT_FLAG ?
- Field::NEXT_NUMBER : Field::NONE);
+
+ if (fld_default_value != NULL && fld_default_value->type() == Item::FUNC_ITEM)
+ {
+ /* There is a function default for insertions. */
+ def= NULL;
+ unireg_check= (on_update_is_function ?
+ Field::TIMESTAMP_DNUN_FIELD : // for insertions and for updates.
+ Field::TIMESTAMP_DN_FIELD); // only for insertions.
+ }
+ else
+ {
+ /* No function default for insertions. Either NULL or a constant. */
+ def= fld_default_value;
+ if (on_update_is_function)
+ unireg_check= Field::TIMESTAMP_UN_FIELD; // function default for updates
+ else
+ unireg_check= ((fld_type_modifier & AUTO_INCREMENT_FLAG) != 0 ?
+ Field::NEXT_NUMBER : // Automatic increment.
+ Field::NONE);
+ }
+
decimals= fld_decimals ? (uint)atoi(fld_decimals) : 0;
if (decimals >= NOT_FIXED_DEC)
{
@@ -9104,44 +9166,6 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
}
length+= MAX_DATETIME_WIDTH + (length ? 1 : 0);
flags|= UNSIGNED_FLAG;
-
- if (fld_default_value)
- {
- /* Grammar allows only NOW() value for ON UPDATE clause */
- if (fld_default_value->type() == Item::FUNC_ITEM &&
- ((Item_func*)fld_default_value)->functype() == Item_func::NOW_FUNC)
- {
- unireg_check= (fld_on_update_value ? Field::TIMESTAMP_DNUN_FIELD:
- Field::TIMESTAMP_DN_FIELD);
- /*
- We don't need default value any longer moreover it is dangerous.
- Everything handled by unireg_check further.
- */
- def= 0;
- }
- else
- unireg_check= (fld_on_update_value ? Field::TIMESTAMP_UN_FIELD:
- Field::NONE);
- }
- else
- {
- /*
- If we have default TIMESTAMP NOT NULL column without explicit DEFAULT
- or ON UPDATE values then for the sake of compatiblity we should treat
- this column as having DEFAULT NOW() ON UPDATE NOW() (when we don't
- have another TIMESTAMP column with auto-set option before this one)
- or DEFAULT 0 (in other cases).
- So here we are setting TIMESTAMP_OLD_FIELD only temporary, and will
- replace this value by TIMESTAMP_DNUN_FIELD or NONE later when
- information about all TIMESTAMP fields in table will be availiable.
-
- If we have TIMESTAMP NULL column without explicit DEFAULT value
- we treat it as having DEFAULT NULL attribute.
- */
- unireg_check= (fld_on_update_value ? Field::TIMESTAMP_UN_FIELD :
- (flags & NOT_NULL_FLAG ? Field::TIMESTAMP_OLD_FIELD :
- Field::NONE));
- }
break;
case MYSQL_TYPE_DATE:
/* We don't support creation of MYSQL_TYPE_DATE anymore */
@@ -9607,11 +9631,18 @@ Create_field::Create_field(Field *old_field,Field *orig_field)
def=0;
char_length= length;
- if (!(flags & (NO_DEFAULT_VALUE_FLAG | BLOB_FLAG)) &&
- old_field->ptr && orig_field &&
- (sql_type != MYSQL_TYPE_TIMESTAMP || /* set def only if */
- old_field->table->timestamp_field != old_field || /* timestamp field */
- unireg_check == Field::TIMESTAMP_UN_FIELD)) /* has default val */
+ /*
+ Copy the default value from the column object orig_field, if:
+ 1) The column has a constant default value.
+ 2) The column type is not a BLOB type.
+ 3) The original column (old_field) was properly initialized with a record
+ buffer pointer.
+ 4) The original column doesn't have a default function to auto-initialize
+ the column on INSERT
+ */
+ if (!(flags & (NO_DEFAULT_VALUE_FLAG | BLOB_FLAG)) && // 1) 2)
+ old_field->ptr && orig_field && // 3)
+ !old_field->has_insert_default_function()) // 4)
{
char buff[MAX_FIELD_WIDTH];
String tmp(buff,sizeof(buff), charset);
@@ -9765,7 +9796,7 @@ void Field::set_datetime_warning(MYSQL_ERROR::enum_warning_level level,
uint code, const ErrConv *str,
timestamp_type ts_type, int cuted_increment)
{
- THD *thd= table->in_use;
+ THD *thd= get_thd();
if (thd->really_abort_on_warning() && level >= MYSQL_ERROR::WARN_LEVEL_WARN)
make_truncated_value_warning(thd, level, str, ts_type, field_name);
else
@@ -9795,3 +9826,29 @@ key_map Field::get_possible_keys()
return (table->pos_in_table_list->is_materialized_derived() ?
part_of_key : key_start);
}
+
+
+/**
+ Mark the field as having an explicit default value.
+
+ @param value if available, the value that the field is being set to
+
+ @note
+ Fields that have an explicit default value should not be updated
+ automatically via the DEFAULT or ON UPDATE functions. The functions
+ that deal with data change functionality (INSERT/UPDATE/LOAD),
+ determine if there is an explicit value for each field before performing
+ the data change, and call this method to mark the field.
+
+ If the 'value' parameter is NULL, then the field is marked unconditionally
+ as having an explicit value. If 'value' is not NULL, then it can be further
+ analyzed to check if it really should count as a value.
+*/
+
+void Field::set_explicit_default(Item *value)
+{
+ if (value->type() == Item::DEFAULT_VALUE_ITEM &&
+ !((Item_default_value*)value)->arg)
+ return;
+ set_has_explicit_value();
+}
diff --git a/sql/field.h b/sql/field.h
index a6382026d97..e832928b114 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -36,6 +36,8 @@ class Protocol;
class Create_field;
class Relay_log_info;
class Field;
+class Column_statistics;
+class Column_statistics_collected;
enum enum_check_fields
{
@@ -66,6 +68,8 @@ enum Derivation
/* The length of the header part for each virtual column in the .frm file */
#define FRM_VCOL_HEADER_SIZE(b) (3 + test(b))
+class Count_distinct_field;
+
struct ha_field_option_struct;
struct st_cache_field;
@@ -216,6 +220,19 @@ public:
*/
bool is_created_from_null_item;
+ bool is_stat_field; /* TRUE in Field objects created for column min/max values */
+
+ /*
+ This structure is used for statistical data on the column
+ that has been read from the statistical table column_stat
+ */
+ Column_statistics *read_stats;
+ /*
+ This structure is used for statistical data on the column that
+ is collected by the function collect_statistics_for_table
+ */
+ Column_statistics_collected *collected_stats;
+
/*
This is additional data provided for any computed(virtual) field.
In particular it includes a pointer to the item by which this field
@@ -308,6 +325,26 @@ public:
virtual uint32 data_length() { return pack_length(); }
virtual uint32 sort_length() const { return pack_length(); }
+ /*
+ Get the number bytes occupied by the value in the field.
+ CHAR values are stripped of trailing spaces.
+ Flexible values are stripped of their length.
+ */
+ virtual uint32 value_length()
+ {
+ uint len;
+ if (!zero_pack() &&
+ (type() == MYSQL_TYPE_STRING &&
+ (len= pack_length()) >= 4 && len < 256))
+ {
+ uchar *str, *end;
+ for (str= ptr, end= str+len; end > str && end[-1] == ' '; end--) {}
+ len=(uint) (end-str);
+ return len;
+ }
+ return data_length();
+ }
+
/**
Get the maximum size of the data in packed format.
@@ -329,6 +366,46 @@ public:
*null_ptr= ((*null_ptr & (uchar) ~null_bit) |
(null_ptr[l_offset] & null_bit));
}
+
+ bool has_insert_default_function() const
+ {
+ return unireg_check == TIMESTAMP_DN_FIELD ||
+ unireg_check == TIMESTAMP_DNUN_FIELD;
+ }
+
+ bool has_update_default_function() const
+ {
+ return unireg_check == TIMESTAMP_UN_FIELD ||
+ unireg_check == TIMESTAMP_DNUN_FIELD;
+ }
+
+ /*
+ Mark the field as having a value supplied by the client, thus it should
+ not be auto-updated.
+ */
+ void set_has_explicit_value()
+ {
+ flags|= HAS_EXPLICIT_VALUE;
+ }
+
+ virtual void set_explicit_default(Item *value);
+
+ /**
+ Evaluates the @c INSERT default function and stores the result in the
+ field. If no such function exists for the column, or the function is not
+ valid for the column's data type, invoking this function has no effect.
+ */
+ virtual int evaluate_insert_default_function() { return 0; }
+
+
+ /**
+ Evaluates the @c UPDATE default function, if one exists, and stores the
+ result in the record buffer. If no such function exists for the column,
+ or the function is not valid for the column's data type, invoking this
+ function has no effect.
+ */
+ virtual int evaluate_update_default_function() { return 0; }
+
virtual bool binary() const { return 1; }
virtual bool zero_pack() const { return 1; }
virtual enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; }
@@ -349,6 +426,36 @@ public:
{ return cmp(a, b); }
virtual int key_cmp(const uchar *str, uint length)
{ return cmp(ptr,str); }
+ /*
+ Update the value m of the 'min_val' field with the current value v
+ of this field if force_update is set to TRUE or if v < m.
+ Return TRUE if the value has been updated.
+ */
+ virtual bool update_min(Field *min_val, bool force_update)
+ {
+ bool update_fl= force_update || cmp(ptr, min_val->ptr) < 0;
+ if (update_fl)
+ {
+ min_val->set_notnull();
+ memcpy(min_val->ptr, ptr, pack_length());
+ }
+ return update_fl;
+ }
+ /*
+ Update the value m of the 'max_val' field with the current value v
+ of this field if force_update is set to TRUE or if v > m.
+ Return TRUE if the value has been updated.
+ */
+ virtual bool update_max(Field *max_val, bool force_update)
+ {
+ bool update_fl= force_update || cmp(ptr, max_val->ptr) > 0;
+ if (update_fl)
+ {
+ max_val->set_notnull();
+ memcpy(max_val->ptr, ptr, pack_length());
+ }
+ return update_fl;
+ }
virtual uint decimals() const { return 0; }
/*
Caller beware: sql_type can change str.Ptr, so check
@@ -384,6 +491,8 @@ public:
*/
inline bool real_maybe_null(void) { return null_ptr != 0; }
+ inline THD *get_thd() { return table ? table->in_use : current_thd; }
+
enum {
LAST_NULL_BYTE_UNDEF= 0
};
@@ -421,6 +530,9 @@ public:
uchar *new_ptr, uchar *new_null_ptr,
uint new_null_bit);
Field *clone(MEM_ROOT *mem_root, TABLE *new_table);
+ Field *clone(MEM_ROOT *mem_root, TABLE *new_table, my_ptrdiff_t diff,
+ bool stat_flag= FALSE);
+ Field *clone(MEM_ROOT *mem_root, my_ptrdiff_t diff);
inline void move_field(uchar *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg)
{
ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg;
@@ -1240,12 +1352,26 @@ public:
virtual int set_time();
virtual void set_default()
{
- if (table->timestamp_field == this &&
- unireg_check != TIMESTAMP_UN_FIELD)
+ if (has_insert_default_function())
set_time();
else
Field::set_default();
}
+ virtual void set_explicit_default(Item *value);
+ virtual int evaluate_insert_default_function()
+ {
+ int res= 0;
+ if (has_insert_default_function())
+ res= set_time();
+ return res;
+ }
+ virtual int evaluate_update_default_function()
+ {
+ int res= 0;
+ if (has_update_default_function())
+ res= set_time();
+ return res;
+ }
/* Get TIMESTAMP field value as seconds since begging of Unix Epoch */
virtual my_time_t get_timestamp(ulong *sec_part) const;
virtual void store_TIME(my_time_t timestamp, ulong sec_part)
@@ -1253,7 +1379,6 @@ public:
int4store(ptr,timestamp);
}
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
- timestamp_auto_set_type get_auto_set_type() const;
uchar *pack(uchar *to, const uchar *from,
uint max_length __attribute__((unused)))
{
@@ -1504,6 +1629,28 @@ public:
void sql_type(String &str) const;
bool zero_pack() const { return 1; }
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ virtual int set_time();
+ virtual void set_default()
+ {
+ if (has_insert_default_function())
+ set_time();
+ else
+ Field::set_default();
+ }
+ virtual int evaluate_insert_default_function()
+ {
+ int res= 0;
+ if (has_insert_default_function())
+ res= set_time();
+ return res;
+ }
+ virtual int evaluate_update_default_function()
+ {
+ int res= 0;
+ if (has_update_default_function())
+ res= set_time();
+ return res;
+ }
uchar *pack(uchar* to, const uchar *from,
uint max_length __attribute__((unused)))
{
@@ -1812,6 +1959,10 @@ public:
int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0L);
int key_cmp(const uchar *,const uchar*);
int key_cmp(const uchar *str, uint length);
+ /* Never update the value of min_val for a blob field */
+ bool update_min(Field *min_val, bool force_update) { return FALSE; }
+ /* Never update the value of max_val for a blob field */
+ bool update_max(Field *max_val, bool force_update) { return FALSE; }
uint32 key_length() const { return 0; }
void sort_string(uchar *buff,uint length);
uint32 pack_length() const
@@ -1829,6 +1980,7 @@ public:
{ return (uint32) (packlength); }
uint row_pack_length() { return pack_length_no_ptr(); }
uint32 sort_length() const;
+ uint32 value_length() { return get_length(); }
virtual uint32 max_data_length() const
{
return (uint32) (((ulonglong) 1 << (packlength*8)) -1);
@@ -2100,6 +2252,28 @@ public:
{ return cmp_binary((uchar *) a, (uchar *) b); }
int key_cmp(const uchar *str, uint length);
int cmp_offset(uint row_offset);
+ bool update_min(Field *min_val, bool force_update)
+ {
+ longlong val= val_int();
+ bool update_fl= force_update || val < min_val->val_int();
+ if (update_fl)
+ {
+ min_val->set_notnull();
+ min_val->store(val, FALSE);
+ }
+ return update_fl;
+ }
+ bool update_max(Field *max_val, bool force_update)
+ {
+ longlong val= val_int();
+ bool update_fl= force_update || val > max_val->val_int();
+ if (update_fl)
+ {
+ max_val->set_notnull();
+ max_val->store(val, FALSE);
+ }
+ return update_fl;
+ }
void get_image(uchar *buff, uint length, CHARSET_INFO *cs)
{ get_key_image(buff, length, itRAW); }
void set_image(const uchar *buff,uint length, CHARSET_INFO *cs)
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 9af57f669e4..f31afc226cc 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -34,6 +34,9 @@
#include "sql_base.h" // update_virtual_fields
#include "sql_test.h" // TEST_filesort
#include "opt_range.h" // SQL_SELECT
+#include "bounded_queue.h"
+#include "filesort_utils.h"
+#include "sql_select.h"
#include "log_slow.h"
#include "debug_sync.h"
@@ -46,22 +49,63 @@ if (my_b_write((file),(uchar*) (from),param->ref_length)) \
static uchar *read_buffpek_from_file(IO_CACHE *buffer_file, uint count,
uchar *buf);
-static ha_rows find_all_keys(SORTPARAM *param,SQL_SELECT *select,
- uchar * *sort_keys, uchar *sort_keys_buf,
- IO_CACHE *buffer_file, IO_CACHE *tempfile);
-static int write_keys(SORTPARAM *param,uchar * *sort_keys,
- uint count, IO_CACHE *buffer_file, IO_CACHE *tempfile);
-static void make_sortkey(SORTPARAM *param,uchar *to, uchar *ref_pos);
-static void register_used_fields(SORTPARAM *param);
-static bool save_index(SORTPARAM *param,uchar **sort_keys, uint count,
- FILESORT_INFO *table_sort);
+static ha_rows find_all_keys(Sort_param *param,SQL_SELECT *select,
+ Filesort_info *fs_info,
+ IO_CACHE *buffer_file,
+ IO_CACHE *tempfile,
+ Bounded_queue<uchar, uchar> *pq,
+ ha_rows *found_rows);
+static int write_keys(Sort_param *param, Filesort_info *fs_info,
+ uint count, IO_CACHE *buffer_file, IO_CACHE *tempfile);
+static void make_sortkey(Sort_param *param, uchar *to, uchar *ref_pos);
+static void register_used_fields(Sort_param *param);
+static bool save_index(Sort_param *param, uint count,
+ Filesort_info *table_sort);
static uint suffix_length(ulong string_length);
static uint sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
bool *multi_byte_charset);
-static SORT_ADDON_FIELD *get_addon_fields(THD *thd, Field **ptabfield,
+static SORT_ADDON_FIELD *get_addon_fields(ulong max_length_for_sort_data,
+ Field **ptabfield,
uint sortlength, uint *plength);
static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
uchar *buff, uchar *buff_end);
+static bool check_if_pq_applicable(Sort_param *param, Filesort_info *info,
+ TABLE *table,
+ ha_rows records, ulong memory_available);
+
+
+void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
+ ulong max_length_for_sort_data,
+ ha_rows maxrows, bool sort_positions)
+{
+ sort_length= sortlen;
+ ref_length= table->file->ref_length;
+ if (!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
+ !table->fulltext_searched && !sort_positions)
+ {
+ /*
+ Get the descriptors of all fields whose values are appended
+ to sorted fields and get its total length in addon_length.
+ */
+ addon_field= get_addon_fields(max_length_for_sort_data,
+ table->field, sort_length, &addon_length);
+ }
+ if (addon_field)
+ res_length= addon_length;
+ else
+ {
+ res_length= ref_length;
+ /*
+ The reference to the record is considered
+ as an additional sorted field
+ */
+ sort_length+= ref_length;
+ }
+ rec_length= sort_length + addon_length;
+ max_rows= maxrows;
+}
+
+
/**
Sort a table.
Creates a set of pointers that can be used to read the rows
@@ -74,15 +118,17 @@ static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
The result set is stored in table->io_cache or
table->record_pointers.
- @param thd Current thread
- @param table Table to sort
- @param sortorder How to sort the table
- @param s_length Number of elements in sortorder
- @param select condition to apply to the rows
- @param max_rows Return only this many rows
- @param sort_positions Set to 1 if we want to force sorting by position
- (Needed by UPDATE/INSERT or ALTER TABLE)
- @param examined_rows Store number of examined rows here
+ @param thd Current thread
+ @param table Table to sort
+ @param sortorder How to sort the table
+ @param s_length Number of elements in sortorder
+ @param select Condition to apply to the rows
+ @param max_rows Return only this many rows
+ @param sort_positions Set to TRUE if we want to force sorting by position
+ (Needed by UPDATE/INSERT or ALTER TABLE or
+ when rowids are required by executor)
+ @param[out] examined_rows Store number of examined rows here
+ @param[out] found_rows Store the number of found rows here
@note
If we sort by position (like if sort_positions is 1) filesort() will
@@ -92,31 +138,30 @@ static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
HA_POS_ERROR Error
@retval
\# Number of rows
- @retval
- examined_rows will be set to number of examined rows
*/
ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
SQL_SELECT *select, ha_rows max_rows,
- bool sort_positions, ha_rows *examined_rows)
+ bool sort_positions,
+ ha_rows *examined_rows,
+ ha_rows *found_rows)
{
int error;
ulong memory_available= thd->variables.sortbuff_size;
- ulong min_sort_memory;
- ulong sort_buff_sz;
uint maxbuffer;
BUFFPEK *buffpek;
ha_rows num_rows= HA_POS_ERROR;
- uchar **sort_keys= 0;
IO_CACHE tempfile, buffpek_pointers, *outfile;
- SORTPARAM param;
+ Sort_param param;
bool multi_byte_charset;
+ Bounded_queue<uchar, uchar> pq;
+
DBUG_ENTER("filesort");
DBUG_EXECUTE("info",TEST_filesort(sortorder,s_length););
#ifdef SKIP_DBUG_IN_FILESORT
DBUG_PUSH(""); /* No DBUG here */
#endif
- FILESORT_INFO table_sort;
+ Filesort_info table_sort= table->sort;
TABLE_LIST *tab= table->pos_in_table_list;
Item_subselect *subselect= tab ? tab->containing_subselect() : 0;
@@ -134,7 +179,6 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
QUICK_INDEX_MERGE_SELECT. Work with a copy and put it back at the end
when index_merge select has finished with it.
*/
- memcpy(&table_sort, &table->sort, sizeof(FILESORT_INFO));
table->sort.io_cache= NULL;
DBUG_ASSERT(table_sort.record_pointers == NULL);
@@ -143,43 +187,22 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
my_b_clear(&buffpek_pointers);
buffpek=0;
error= 1;
- bzero((char*) &param,sizeof(param));
- param.sort_length= sortlength(thd, sortorder, s_length, &multi_byte_charset);
- param.ref_length= table->file->ref_length;
- if (!(table->file->ha_table_flags() & HA_FAST_KEY_READ) &&
- !table->fulltext_searched && !sort_positions)
- {
- /*
- Get the descriptors of all fields whose values are appended
- to sorted fields and get its total length in param.spack_length.
- */
- param.addon_field= get_addon_fields(thd, table->field,
- param.sort_length,
- &param.addon_length);
- }
+
+ param.init_for_filesort(sortlength(thd, sortorder, s_length,
+ &multi_byte_charset),
+ table,
+ thd->variables.max_length_for_sort_data,
+ max_rows, sort_positions);
table_sort.addon_buf= 0;
table_sort.addon_length= param.addon_length;
table_sort.addon_field= param.addon_field;
table_sort.unpack= unpack_addon_fields;
- if (param.addon_field)
- {
- param.res_length= param.addon_length;
- if (!(table_sort.addon_buf= (uchar *) my_malloc(param.addon_length,
- MYF(MY_WME))))
- goto err;
- }
- else
- {
- param.res_length= param.ref_length;
- /*
- The reference to the record is considered
- as an additional sorted field
- */
- param.sort_length+= param.ref_length;
- }
- param.rec_length= param.sort_length+param.addon_length;
- param.max_rows= max_rows;
+ if (param.addon_field &&
+ !(table_sort.addon_buf=
+ (uchar *) my_malloc(param.addon_length, MYF(MY_WME |
+ MY_THREAD_SPECIFIC))))
+ goto err;
if (select && select->quick)
status_var_increment(thd->status_var.filesort_range_count);
@@ -191,25 +214,58 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
num_rows= table->file->estimate_rows_upper_bound();
if (multi_byte_charset &&
- !(param.tmp_buffer= (char*) my_malloc(param.sort_length,MYF(MY_WME))))
+ !(param.tmp_buffer= (char*) my_malloc(param.sort_length,
+ MYF(MY_WME | MY_THREAD_SPECIFIC))))
goto err;
- min_sort_memory= max(MIN_SORT_MEMORY, param.sort_length * MERGEBUFF2);
- set_if_bigger(min_sort_memory, sizeof(BUFFPEK*)*MERGEBUFF2);
- if (!table_sort.sort_keys)
+ if (check_if_pq_applicable(&param, &table_sort,
+ table, num_rows, memory_available))
+ {
+ DBUG_PRINT("info", ("filesort PQ is applicable"));
+ const size_t compare_length= param.sort_length;
+ if (pq.init(param.max_rows,
+ true, // max_at_top
+ NULL, // compare_function
+ compare_length,
+ &make_sortkey, &param, table_sort.get_sort_keys()))
+ {
+ /*
+ If we fail to init pq, we have to give up:
+ out of memory means my_malloc() will call my_error().
+ */
+ DBUG_PRINT("info", ("failed to allocate PQ"));
+ table_sort.free_sort_buffer();
+ DBUG_ASSERT(thd->is_error());
+ goto err;
+ }
+ // For PQ queries (with limit) we initialize all pointers.
+ table_sort.init_record_pointers();
+ }
+ else
{
+ DBUG_PRINT("info", ("filesort PQ is not applicable"));
+
+ ulong min_sort_memory= max(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2);
+ set_if_bigger(min_sort_memory, sizeof(BUFFPEK*)*MERGEBUFF2);
while (memory_available >= min_sort_memory)
{
ulong keys= memory_available / (param.rec_length + sizeof(char*));
- table_sort.keys= (uint) min(num_rows, keys);
- sort_buff_sz= table_sort.keys*(param.rec_length+sizeof(char*));
- set_if_bigger(sort_buff_sz, param.rec_length * MERGEBUFF2);
-
- DBUG_EXECUTE_IF("make_sort_keys_alloc_fail",
- DBUG_SET("+d,simulate_out_of_memory"););
-
- if ((table_sort.sort_keys=
- (uchar**) my_malloc(sort_buff_sz, MYF(0))))
+ param.max_keys_per_buffer= (uint) min(num_rows, keys);
+ if (table_sort.get_sort_keys())
+ {
+ // If we have already allocated a buffer, it better have same size!
+ if (!table_sort.check_sort_buffer_properties(param.max_keys_per_buffer,
+ param.rec_length))
+ {
+ /*
+ table->sort will still have a pointer to the same buffer,
+ but that will be overwritten by the assignment below.
+ */
+ table_sort.free_sort_buffer();
+ }
+ }
+ table_sort.alloc_sort_buffer(param.max_keys_per_buffer, param.rec_length);
+ if (table_sort.get_sort_keys())
break;
ulong old_memory_available= memory_available;
memory_available= memory_available/4*3;
@@ -217,40 +273,37 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
old_memory_available > min_sort_memory)
memory_available= min_sort_memory;
}
+ if (memory_available < min_sort_memory)
+ {
+ my_error(ER_OUT_OF_SORTMEMORY,MYF(ME_ERROR + ME_FATALERROR));
+ goto err;
+ }
}
- sort_keys= table_sort.sort_keys;
- param.keys= table_sort.keys;
- if (memory_available < min_sort_memory)
- {
- my_error(ER_OUT_OF_SORTMEMORY,MYF(ME_ERROR + ME_FATALERROR));
- goto err;
- }
if (open_cached_file(&buffpek_pointers,mysql_tmpdir,TEMP_PREFIX,
- DISK_BUFFER_SIZE, MYF(ME_ERROR | MY_WME)))
+ DISK_BUFFER_SIZE, MYF(MY_WME)))
goto err;
param.sort_form= table;
param.end=(param.local_sortorder=sortorder)+s_length;
- num_rows= find_all_keys(&param,
- select,
- sort_keys,
- (uchar *)(sort_keys+param.keys),
+ num_rows= find_all_keys(&param, select,
+ &table_sort,
&buffpek_pointers,
- &tempfile);
+ &tempfile,
+ pq.is_initialized() ? &pq : NULL,
+ found_rows);
if (num_rows == HA_POS_ERROR)
goto err;
+
maxbuffer= (uint) (my_b_tell(&buffpek_pointers)/sizeof(*buffpek));
if (maxbuffer == 0) // The whole set is in memory
{
- if (save_index(&param,sort_keys,(uint) num_rows, &table_sort))
+ if (save_index(&param, (uint) num_rows, &table_sort))
goto err;
}
else
{
- thd->query_plan_flags|= QPLAN_FILESORT_DISK;
-
/* filesort cannot handle zero-length records during merge. */
DBUG_ASSERT(param.sort_length != 0);
@@ -269,7 +322,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
/* Open cached file if it isn't open */
if (! my_b_inited(outfile) &&
open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER,
- MYF(ME_ERROR | MY_WME)))
+ MYF(MY_WME)))
goto err;
if (reinit_io_cache(outfile,WRITE_CACHE,0L,0,0))
goto err;
@@ -278,18 +331,20 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
Use also the space previously used by string pointers in sort_buffer
for temporary key storage.
*/
- param.keys=((param.keys *
- (param.rec_length+sizeof(char*))) /
- param.rec_length - 1);
+ param.max_keys_per_buffer=((param.max_keys_per_buffer *
+ (param.rec_length + sizeof(char*))) /
+ param.rec_length - 1);
maxbuffer--; // Offset from 0
- if (merge_many_buff(&param,(uchar*) sort_keys,buffpek,&maxbuffer,
+ if (merge_many_buff(&param,
+ (uchar*) table_sort.get_sort_keys(),
+ buffpek,&maxbuffer,
&tempfile))
goto err;
if (flush_io_cache(&tempfile) ||
reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
goto err;
if (merge_index(&param,
- (uchar*) sort_keys,
+ (uchar*) table_sort.get_sort_keys(),
buffpek,
maxbuffer,
&tempfile,
@@ -304,12 +359,11 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
}
error= 0;
- err:
+ err:
my_free(param.tmp_buffer);
if (!subselect || !subselect->is_uncacheable())
{
- my_free(sort_keys);
- table_sort.sort_keys= 0;
+ table_sort.free_sort_buffer();
my_free(buffpek);
table_sort.buffpek= 0;
table_sort.buffpek_len= 0;
@@ -340,7 +394,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
thd->killed == ABORT_QUERY ? "" : thd->stmt_da->message());
if (global_system_variables.log_warnings > 1)
- {
+ {
sql_print_warning("%s, host: %s, user: %s, thread: %lu, query: %-.4096s",
ER_THD(thd, ER_FILSORT_ABORT),
thd->security_ctx->host_or_ip,
@@ -356,8 +410,13 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
#ifdef SKIP_DBUG_IN_FILESORT
DBUG_POP(); /* Ok to DBUG */
#endif
- memcpy(&table->sort, &table_sort, sizeof(FILESORT_INFO));
- DBUG_PRINT("exit",("num_rows: %ld", (long) num_rows));
+
+ // Assign the copy back!
+ table->sort= table_sort;
+
+ DBUG_PRINT("exit",
+ ("num_rows: %ld examined_rows: %ld found_rows: %ld",
+ (long) num_rows, (long) *examined_rows, (long) *found_rows));
MYSQL_FILESORT_DONE(error, num_rows);
DBUG_RETURN(error ? HA_POS_ERROR : num_rows);
} /* filesort */
@@ -371,8 +430,7 @@ void filesort_free_buffers(TABLE *table, bool full)
if (full)
{
- my_free(table->sort.sort_keys);
- table->sort.sort_keys= NULL;
+ table->sort.free_sort_buffer();
my_free(table->sort.buffpek);
table->sort.buffpek= NULL;
table->sort.buffpek_len= 0;
@@ -397,7 +455,7 @@ static uchar *read_buffpek_from_file(IO_CACHE *buffpek_pointers, uint count,
if (count > UINT_MAX/sizeof(BUFFPEK))
return 0; /* sizeof(BUFFPEK)*count will overflow */
if (!tmp)
- tmp= (uchar *)my_malloc(length, MYF(MY_WME));
+ tmp= (uchar *)my_malloc(length, MYF(MY_WME | MY_THREAD_SPECIFIC));
if (tmp)
{
if (reinit_io_cache(buffpek_pointers,READ_CACHE,0L,0,0) ||
@@ -461,8 +519,10 @@ static void dbug_print_record(TABLE *table, bool print_rowid)
}
#endif
+
/**
- Search after sort_keys and write them into tempfile.
+ Search after sort_keys, and write them into tempfile
+ (if we run out of space in the sort_keys buffer).
All produced sequences are guaranteed to be non-empty.
@param param Sorting parameter
@@ -471,19 +531,28 @@ static void dbug_print_record(TABLE *table, bool print_rowid)
@param buffpek_pointers File to write BUFFPEKs describing sorted segments
in tempfile.
@param tempfile File to write sorted sequences of sortkeys to.
+ @param pq If !NULL, use it for keeping top N elements
+ @param [out] found_rows The number of FOUND_ROWS().
+ For a query with LIMIT, this value will typically
+ be larger than the function return value.
@note
Basic idea:
@verbatim
while (get_next_sortkey())
{
- if (no free space in sort_keys buffers)
+ if (using priority queue)
+ push sort key into queue
+ else
{
- sort sort_keys buffer;
- dump sorted sequence to 'tempfile';
- dump BUFFPEK describing sequence location into 'buffpek_pointers';
+ if (no free space in sort_keys buffers)
+ {
+ sort sort_keys buffer;
+ dump sorted sequence to 'tempfile';
+ dump BUFFPEK describing sequence location into 'buffpek_pointers';
+ }
+ put sort key into 'sort_keys';
}
- put sort key into 'sort_keys';
}
if (sort_keys has some elements && dumped at least once)
sort-dump-dump as above;
@@ -497,10 +566,12 @@ static void dbug_print_record(TABLE *table, bool print_rowid)
HA_POS_ERROR on error.
*/
-static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
- uchar **sort_keys, uchar *sort_keys_buf,
+static ha_rows find_all_keys(Sort_param *param, SQL_SELECT *select,
+ Filesort_info *fs_info,
IO_CACHE *buffpek_pointers,
- IO_CACHE *tempfile)
+ IO_CACHE *tempfile,
+ Bounded_queue<uchar, uchar> *pq,
+ ha_rows *found_rows)
{
int error,flag,quick_select;
uint idx,indexpos,ref_length;
@@ -508,10 +579,9 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
my_off_t record;
TABLE *sort_form;
THD *thd= current_thd;
- volatile killed_state *killed= &thd->killed;
handler *file;
MY_BITMAP *save_read_set, *save_write_set, *save_vcol_set;
- uchar *next_sort_key= sort_keys_buf;
+
DBUG_ENTER("find_all_keys");
DBUG_PRINT("info",("using: %s",
(select ? select->quick ? "ranges" : "where":
@@ -525,10 +595,16 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
ref_pos= ref_buff;
quick_select=select && select->quick;
record=0;
+ *found_rows= 0;
flag= ((file->ha_table_flags() & HA_REC_NOT_IN_SEQ) || quick_select);
if (flag)
ref_pos= &file->ref[0];
next_pos=ref_pos;
+
+ DBUG_EXECUTE_IF("show_explain_in_find_all_keys",
+ dbug_serve_apcs(thd, 1);
+ );
+
if (!quick_select)
{
next_pos=(uchar*) 0; /* Find records in sequence */
@@ -540,7 +616,6 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
current_thd->variables.read_buff_size);
}
-
/* Remember original bitmaps */
save_read_set= sort_form->read_set;
save_write_set= sort_form->write_set;
@@ -594,7 +669,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
break;
}
- if (*killed)
+ if (thd->check_killed())
{
DBUG_PRINT("info",("Sort killed by user"));
if (!quick_select)
@@ -639,18 +714,23 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
if (write_record)
{
- if (idx == param->keys)
+ ++(*found_rows);
+ if (pq)
{
- if (write_keys(param, sort_keys,
- idx, buffpek_pointers, tempfile))
- DBUG_RETURN(HA_POS_ERROR);
- idx= 0;
- next_sort_key= sort_keys_buf;
- indexpos++;
+ pq->push(ref_pos);
+ idx= pq->num_elements();
+ }
+ else
+ {
+ if (idx == param->max_keys_per_buffer)
+ {
+ if (write_keys(param, fs_info, idx, buffpek_pointers, tempfile))
+ DBUG_RETURN(HA_POS_ERROR);
+ idx= 0;
+ indexpos++;
+ }
+ make_sortkey(param, fs_info->get_record_buffer(idx++), ref_pos);
}
- sort_keys[idx++]= next_sort_key;
- make_sortkey(param, next_sort_key, ref_pos);
- next_sort_key+= param->rec_length;
}
else
file->unlock_row();
@@ -679,12 +759,12 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
}
if (indexpos && idx &&
- write_keys(param, sort_keys,
- idx, buffpek_pointers, tempfile))
+ write_keys(param, fs_info, idx, buffpek_pointers, tempfile))
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
const ha_rows retval=
my_b_inited(tempfile) ?
(ha_rows) (my_b_tell(tempfile)/param->rec_length) : idx;
+ DBUG_PRINT("info", ("find_all_keys return %u", (uint) retval));
DBUG_RETURN(retval);
} /* find_all_keys */
@@ -712,21 +792,19 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
*/
static int
-write_keys(SORTPARAM *param, register uchar **sort_keys, uint count,
+write_keys(Sort_param *param, Filesort_info *fs_info, uint count,
IO_CACHE *buffpek_pointers, IO_CACHE *tempfile)
{
- size_t sort_length, rec_length;
+ size_t rec_length;
uchar **end;
BUFFPEK buffpek;
DBUG_ENTER("write_keys");
- sort_length= param->sort_length;
rec_length= param->rec_length;
-#ifdef MC68000
- quicksort(sort_keys,count,sort_length);
-#else
- my_string_ptr_sort((uchar*) sort_keys, (uint) count, sort_length);
-#endif
+ uchar **sort_keys= fs_info->get_sort_keys();
+
+ fs_info->sort_buffer(param, count);
+
if (!my_b_inited(tempfile) &&
open_cached_file(tempfile, mysql_tmpdir, TEMP_PREFIX, DISK_BUFFER_SIZE,
MYF(MY_WME)))
@@ -775,8 +853,8 @@ static inline void store_length(uchar *to, uint length, uint pack_length)
/** Make a sort-key from record. */
-static void make_sortkey(register SORTPARAM *param,
- register uchar *to, uchar *ref_pos)
+static void make_sortkey(register Sort_param *param,
+ register uchar *to, uchar *ref_pos)
{
reg3 Field *field;
reg1 SORT_FIELD *sort_field;
@@ -800,7 +878,7 @@ static void make_sortkey(register SORTPARAM *param,
switch (sort_field->result_type) {
case STRING_RESULT:
{
- CHARSET_INFO *cs=item->collation.collation;
+ const CHARSET_INFO *cs=item->collation.collation;
char fill_char= ((cs->state & MY_CS_BINSORT) ? (char) 0 : ' ');
int diff;
uint sort_field_length;
@@ -813,7 +891,7 @@ static void make_sortkey(register SORTPARAM *param,
if (!res)
{
if (maybe_null)
- bzero((char*) to-1,sort_field->length+1);
+ memset(to-1, 0, sort_field->length+1);
else
{
/* purecov: begin deadcode */
@@ -825,7 +903,7 @@ static void make_sortkey(register SORTPARAM *param,
DBUG_ASSERT(0);
DBUG_PRINT("warning",
("Got null on something that shouldn't be null"));
- bzero((char*) to,sort_field->length); // Avoid crash
+ memset(to, 0, sort_field->length); // Avoid crash
/* purecov: end */
}
break;
@@ -881,12 +959,19 @@ static void make_sortkey(register SORTPARAM *param,
}
if (maybe_null)
{
+ *to++=1; /* purecov: inspected */
if (item->null_value)
{
- bzero((char*) to++, sort_field->length+1);
+ if (maybe_null)
+ memset(to-1, 0, sort_field->length+1);
+ else
+ {
+ DBUG_PRINT("warning",
+ ("Got null on something that shouldn't be null"));
+ memset(to, 0, sort_field->length);
+ }
break;
}
- *to++=1; /* purecov: inspected */
}
to[7]= (uchar) value;
to[6]= (uchar) (value >> 8);
@@ -908,7 +993,8 @@ static void make_sortkey(register SORTPARAM *param,
{
if (item->null_value)
{
- bzero((char*) to++, sort_field->length+1);
+ memset(to, 0, sort_field->length+1);
+ to++;
break;
}
*to++=1;
@@ -925,7 +1011,7 @@ static void make_sortkey(register SORTPARAM *param,
{
if (item->null_value)
{
- bzero((char*) to,sort_field->length+1);
+ memset(to, 0, sort_field->length+1);
to++;
break;
}
@@ -970,7 +1056,7 @@ static void make_sortkey(register SORTPARAM *param,
SORT_ADDON_FIELD *addonf= param->addon_field;
uchar *nulls= to;
DBUG_ASSERT(addonf != 0);
- bzero((char *) nulls, addonf->offset);
+ memset(nulls, 0, addonf->offset);
to+= addonf->offset;
for ( ; (field= addonf->field) ; addonf++)
{
@@ -1009,7 +1095,7 @@ static void make_sortkey(register SORTPARAM *param,
Register fields used by sorting in the sorted table's read set
*/
-static void register_used_fields(SORTPARAM *param)
+static void register_used_fields(Sort_param *param)
{
reg1 SORT_FIELD *sort_field;
TABLE *table=param->sort_form;
@@ -1054,21 +1140,20 @@ static void register_used_fields(SORTPARAM *param)
}
-static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count,
- FILESORT_INFO *table_sort)
+static bool save_index(Sort_param *param, uint count, Filesort_info *table_sort)
{
uint offset,res_length;
uchar *to;
DBUG_ENTER("save_index");
- my_string_ptr_sort((uchar*) sort_keys, (uint) count, param->sort_length);
+ table_sort->sort_buffer(param, count);
res_length= param->res_length;
offset= param->rec_length-res_length;
- if ((ha_rows) count > param->max_rows)
- count=(uint) param->max_rows;
if (!(to= table_sort->record_pointers=
- (uchar*) my_malloc(res_length*count, MYF(MY_WME))))
+ (uchar*) my_malloc(res_length*count,
+ MYF(MY_WME | MY_THREAD_SPECIFIC))))
DBUG_RETURN(1); /* purecov: inspected */
+ uchar **sort_keys= table_sort->get_sort_keys();
for (uchar **end= sort_keys+count ; sort_keys != end ; sort_keys++)
{
memcpy(to, *sort_keys+offset, res_length);
@@ -1078,10 +1163,150 @@ static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count,
}
+/**
+ Test whether priority queue is worth using to get top elements of an
+ ordered result set. If it is, then allocates buffer for required amount of
+ records
+
+ @param param Sort parameters.
+ @param filesort_info Filesort information.
+ @param table Table to sort.
+ @param num_rows Estimate of number of rows in source record set.
+ @param memory_available Memory available for sorting.
+
+ DESCRIPTION
+ Given a query like this:
+ SELECT ... FROM t ORDER BY a1,...,an LIMIT max_rows;
+ This function tests whether a priority queue should be used to keep
+ the result. Necessary conditions are:
+ - estimate that it is actually cheaper than merge-sort
+ - enough memory to store the <max_rows> records.
+
+ If we don't have space for <max_rows> records, but we *do* have
+ space for <max_rows> keys, we may rewrite 'table' to sort with
+ references to records instead of additional data.
+ (again, based on estimates that it will actually be cheaper).
+
+ @retval
+ true - if it's ok to use PQ
+ false - PQ will be slower than merge-sort, or there is not enough memory.
+*/
+
+bool check_if_pq_applicable(Sort_param *param,
+ Filesort_info *filesort_info,
+ TABLE *table, ha_rows num_rows,
+ ulong memory_available)
+{
+ DBUG_ENTER("check_if_pq_applicable");
+
+ /*
+ How much Priority Queue sort is slower than qsort.
+ Measurements (see unit test) indicate that PQ is roughly 3 times slower.
+ */
+ const double PQ_slowness= 3.0;
+
+ if (param->max_rows == HA_POS_ERROR)
+ {
+ DBUG_PRINT("info", ("No LIMIT"));
+ DBUG_RETURN(NULL);
+ }
+
+ if (param->max_rows + 2 >= UINT_MAX)
+ {
+ DBUG_PRINT("info", ("Too large LIMIT"));
+ DBUG_RETURN(NULL);
+ }
+
+ ulong num_available_keys=
+ memory_available / (param->rec_length + sizeof(char*));
+ // We need 1 extra record in the buffer, when using PQ.
+ param->max_keys_per_buffer= (uint) param->max_rows + 1;
+
+ if (num_rows < num_available_keys)
+ {
+ // The whole source set fits into memory.
+ if (param->max_rows < num_rows/PQ_slowness )
+ {
+ filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->rec_length);
+ DBUG_RETURN(filesort_info->get_sort_keys() != NULL);
+ }
+ else
+ {
+ // PQ will be slower.
+ DBUG_RETURN(false);
+ }
+ }
+
+ // Do we have space for LIMIT rows in memory?
+ if (param->max_keys_per_buffer < num_available_keys)
+ {
+ filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->rec_length);
+ DBUG_RETURN(filesort_info->get_sort_keys() != NULL);
+ }
+
+ // Try to strip off addon fields.
+ if (param->addon_field)
+ {
+ const ulong row_length=
+ param->sort_length + param->ref_length + sizeof(char*);
+ num_available_keys= memory_available / row_length;
+
+ // Can we fit all the keys in memory?
+ if (param->max_keys_per_buffer < num_available_keys)
+ {
+ const double sort_merge_cost=
+ get_merge_many_buffs_cost_fast(num_rows,
+ num_available_keys,
+ row_length);
+ /*
+ PQ has cost:
+ (insert + qsort) * log(queue size) / TIME_FOR_COMPARE_ROWID +
+ cost of file lookup afterwards.
+ The lookup cost is a bit pessimistic: we take scan_time and assume
+ that on average we find the row after scanning half of the file.
+ A better estimate would be lookup cost, but note that we are doing
+ random lookups here, rather than sequential scan.
+ */
+ const double pq_cpu_cost=
+ (PQ_slowness * num_rows + param->max_keys_per_buffer) *
+ log((double) param->max_keys_per_buffer) / TIME_FOR_COMPARE_ROWID;
+ const double pq_io_cost=
+ param->max_rows * table->file->scan_time() / 2.0;
+ const double pq_cost= pq_cpu_cost + pq_io_cost;
+
+ if (sort_merge_cost < pq_cost)
+ DBUG_RETURN(false);
+
+ filesort_info->alloc_sort_buffer(param->max_keys_per_buffer,
+ param->sort_length + param->ref_length);
+ if (filesort_info->get_sort_keys())
+ {
+ // Make attached data to be references instead of fields.
+ my_free(filesort_info->addon_buf);
+ my_free(filesort_info->addon_field);
+ filesort_info->addon_buf= NULL;
+ filesort_info->addon_field= NULL;
+ param->addon_field= NULL;
+ param->addon_length= 0;
+
+ param->res_length= param->ref_length;
+ param->sort_length+= param->ref_length;
+ param->rec_length= param->sort_length;
+
+ DBUG_RETURN(true);
+ }
+ }
+ }
+ DBUG_RETURN(false);
+}
+
+
/** Merge buffers to make < MERGEBUFF2 buffers. */
-int merge_many_buff(SORTPARAM *param, uchar *sort_buffer,
- BUFFPEK *buffpek, uint *maxbuffer, IO_CACHE *t_file)
+int merge_many_buff(Sort_param *param, uchar *sort_buffer,
+ BUFFPEK *buffpek, uint *maxbuffer, IO_CACHE *t_file)
{
register uint i;
IO_CACHE t_file2,*from_file,*to_file,*temp;
@@ -1212,7 +1437,7 @@ void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length)
other error
*/
-int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
+int merge_buffers(Sort_param *param, IO_CACHE *from_file,
IO_CACHE *to_file, uchar *sort_buffer,
BUFFPEK *lastbuff, BUFFPEK *Fb, BUFFPEK *Tb,
int flag)
@@ -1230,18 +1455,13 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
void *first_cmp_arg;
element_count dupl_count= 0;
uchar *src;
- killed_state not_killable;
uchar *unique_buff= param->unique_buff;
- volatile killed_state *killed= &current_thd->killed;
+ const bool killable= !param->not_killable;
+ THD* const thd=current_thd;
DBUG_ENTER("merge_buffers");
- status_var_increment(current_thd->status_var.filesort_merge_passes);
- current_thd->query_plan_fsort_passes++;
- if (param->not_killable)
- {
- killed= &not_killable;
- not_killable= NOT_KILLED;
- }
+ status_var_increment(thd->status_var.filesort_merge_passes);
+ thd->query_plan_fsort_passes++;
error=0;
rec_length= param->rec_length;
@@ -1254,7 +1474,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
(flag && min_dupl_count ? sizeof(dupl_count) : 0)-res_length);
uint wr_len= flag ? res_length : rec_length;
uint wr_offset= flag ? offset : 0;
- maxcount= (ulong) (param->keys/((uint) (Tb-Fb) +1));
+ maxcount= (ulong) (param->max_keys_per_buffer/((uint) (Tb-Fb) +1));
to_start_filepos= my_b_tell(to_file);
strpos= sort_buffer;
org_max_rows=max_rows= param->max_rows;
@@ -1318,7 +1538,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
while (queue.elements > 1)
{
- if (*killed)
+ if (killable && thd->check_killed())
{
error= 1; goto err; /* purecov: inspected */
}
@@ -1394,7 +1614,7 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file,
}
buffpek= (BUFFPEK*) queue_top(&queue);
buffpek->base= (uchar*) sort_buffer;
- buffpek->max_keys= param->keys;
+ buffpek->max_keys= param->max_keys_per_buffer;
/*
As we know all entries in the buffer are unique, we only have to
@@ -1484,9 +1704,9 @@ err:
/* Do a merge to output-file (save only positions) */
-int merge_index(SORTPARAM *param, uchar *sort_buffer,
- BUFFPEK *buffpek, uint maxbuffer,
- IO_CACHE *tempfile, IO_CACHE *outfile)
+int merge_index(Sort_param *param, uchar *sort_buffer,
+ BUFFPEK *buffpek, uint maxbuffer,
+ IO_CACHE *tempfile, IO_CACHE *outfile)
{
DBUG_ENTER("merge_index");
if (merge_buffers(param,tempfile,outfile,sort_buffer,buffpek,buffpek,
@@ -1532,7 +1752,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
bool *multi_byte_charset)
{
reg2 uint length;
- CHARSET_INFO *cs;
+ const CHARSET_INFO *cs;
*multi_byte_charset= 0;
length=0;
@@ -1633,7 +1853,8 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
*/
static SORT_ADDON_FIELD *
-get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
+get_addon_fields(ulong max_length_for_sort_data,
+ Field **ptabfield, uint sortlength, uint *plength)
{
Field **pfield;
Field *field;
@@ -1670,9 +1891,11 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength)
return 0;
length+= (null_fields+7)/8;
- if (length+sortlength > thd->variables.max_length_for_sort_data ||
+ if (length+sortlength > max_length_for_sort_data ||
!(addonf= (SORT_ADDON_FIELD *) my_malloc(sizeof(SORT_ADDON_FIELD)*
- (fields+1), MYF(MY_WME))))
+ (fields+1),
+ MYF(MY_WME |
+ MY_THREAD_SPECIFIC))))
return 0;
*plength= length;
@@ -1753,7 +1976,7 @@ void change_double_for_sort(double nr,uchar *to)
if (nr == 0.0)
{ /* Change to zero string */
tmp[0]=(uchar) 128;
- bzero((char*) tmp+1,sizeof(nr)-1);
+ memset(tmp+1, 0, sizeof(nr)-1);
}
else
{
diff --git a/sql/filesort.h b/sql/filesort.h
index 8ee8999d055..8960fa6cb66 100644
--- a/sql/filesort.h
+++ b/sql/filesort.h
@@ -29,10 +29,8 @@ typedef struct st_sort_field SORT_FIELD;
ha_rows filesort(THD *thd, TABLE *table, st_sort_field *sortorder,
uint s_length, SQL_SELECT *select,
ha_rows max_rows, bool sort_positions,
- ha_rows *examined_rows);
+ ha_rows *examined_rows, ha_rows *found_rows);
void filesort_free_buffers(TABLE *table, bool full);
-double get_merge_many_buffs_cost(uint *buffer, uint last_n_elems,
- int elem_size);
void change_double_for_sort(double nr,uchar *to);
#endif /* FILESORT_INCLUDED */
diff --git a/sql/filesort_utils.cc b/sql/filesort_utils.cc
new file mode 100644
index 00000000000..f8f6d5c9420
--- /dev/null
+++ b/sql/filesort_utils.cc
@@ -0,0 +1,143 @@
+/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "filesort_utils.h"
+#include "sql_const.h"
+#include "sql_sort.h"
+#include "table.h"
+#include "my_sys.h"
+
+
+namespace {
+/**
+ A local helper function. See comments for get_merge_buffers_cost().
+ */
+double get_merge_cost(ha_rows num_elements, ha_rows num_buffers, uint elem_size)
+{
+ return
+ 2.0 * ((double) num_elements * elem_size) / IO_SIZE
+ + (double) num_elements * log((double) num_buffers) /
+ (TIME_FOR_COMPARE_ROWID * M_LN2);
+}
+}
+
+/**
+ This is a simplified, and faster version of @see get_merge_many_buffs_cost().
+ We calculate the cost of merging buffers, by simulating the actions
+ of @see merge_many_buff. For explanations of formulas below,
+ see comments for get_merge_buffers_cost().
+ TODO: Use this function for Unique::get_use_cost().
+*/
+double get_merge_many_buffs_cost_fast(ha_rows num_rows,
+ ha_rows num_keys_per_buffer,
+ uint elem_size)
+{
+ ha_rows num_buffers= num_rows / num_keys_per_buffer;
+ ha_rows last_n_elems= num_rows % num_keys_per_buffer;
+ double total_cost;
+
+ // Calculate CPU cost of sorting buffers.
+ total_cost=
+ ( num_buffers * num_keys_per_buffer * log(1.0 + num_keys_per_buffer) +
+ last_n_elems * log(1.0 + last_n_elems) )
+ / TIME_FOR_COMPARE_ROWID;
+
+ // Simulate behavior of merge_many_buff().
+ while (num_buffers >= MERGEBUFF2)
+ {
+ // Calculate # of calls to merge_buffers().
+ const ha_rows loop_limit= num_buffers - MERGEBUFF*3/2;
+ const ha_rows num_merge_calls= 1 + loop_limit/MERGEBUFF;
+ const ha_rows num_remaining_buffs=
+ num_buffers - num_merge_calls * MERGEBUFF;
+
+ // Cost of merge sort 'num_merge_calls'.
+ total_cost+=
+ num_merge_calls *
+ get_merge_cost(num_keys_per_buffer * MERGEBUFF, MERGEBUFF, elem_size);
+
+ // # of records in remaining buffers.
+ last_n_elems+= num_remaining_buffs * num_keys_per_buffer;
+
+ // Cost of merge sort of remaining buffers.
+ total_cost+=
+ get_merge_cost(last_n_elems, 1 + num_remaining_buffs, elem_size);
+
+ num_buffers= num_merge_calls;
+ num_keys_per_buffer*= MERGEBUFF;
+ }
+
+ // Simulate final merge_buff call.
+ last_n_elems+= num_keys_per_buffer * num_buffers;
+ total_cost+= get_merge_cost(last_n_elems, 1 + num_buffers, elem_size);
+ return total_cost;
+}
+
+uchar **Filesort_buffer::alloc_sort_buffer(uint num_records, uint record_length)
+{
+ ulong sort_buff_sz;
+
+ DBUG_ENTER("alloc_sort_buffer");
+
+ DBUG_EXECUTE_IF("alloc_sort_buffer_fail",
+ DBUG_SET("+d,simulate_out_of_memory"););
+
+ if (m_idx_array.is_null())
+ {
+ sort_buff_sz= num_records * (record_length + sizeof(uchar*));
+ set_if_bigger(sort_buff_sz, record_length * MERGEBUFF2);
+ uchar **sort_keys=
+ (uchar**) my_malloc(sort_buff_sz, MYF(MY_THREAD_SPECIFIC));
+ m_idx_array= Idx_array(sort_keys, num_records);
+ m_record_length= record_length;
+ uchar **start_of_data= m_idx_array.array() + m_idx_array.size();
+ m_start_of_data= reinterpret_cast<uchar*>(start_of_data);
+ }
+ else
+ {
+ DBUG_ASSERT(num_records == m_idx_array.size());
+ DBUG_ASSERT(record_length == m_record_length);
+ }
+ DBUG_RETURN(m_idx_array.array());
+}
+
+
+void Filesort_buffer::free_sort_buffer()
+{
+ my_free(m_idx_array.array());
+ m_idx_array= Idx_array();
+ m_record_length= 0;
+ m_start_of_data= NULL;
+}
+
+
+void Filesort_buffer::sort_buffer(const Sort_param *param, uint count)
+{
+ if (count <= 1)
+ return;
+ uchar **keys= get_sort_keys();
+ uchar **buffer= NULL;
+ if (radixsort_is_appliccable(count, param->sort_length) &&
+ (buffer= (uchar**) my_malloc(count*sizeof(char*),
+ MYF(MY_THREAD_SPECIFIC))))
+ {
+ radixsort_for_str_ptr(keys, count, param->sort_length, buffer);
+ my_free(buffer);
+ return;
+ }
+
+ size_t size= param->sort_length;
+ my_qsort2(keys, count, sizeof(uchar*), get_ptr_compare(size), &size);
+}
diff --git a/sql/filesort_utils.h b/sql/filesort_utils.h
new file mode 100644
index 00000000000..4cccf8ffa02
--- /dev/null
+++ b/sql/filesort_utils.h
@@ -0,0 +1,129 @@
+/* Copyright (c) 2010, 2012 Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef FILESORT_UTILS_INCLUDED
+#define FILESORT_UTILS_INCLUDED
+
+#include "my_global.h"
+#include "my_base.h"
+#include "sql_array.h"
+
+class Sort_param;
+/*
+ Calculate cost of merge sort
+
+ @param num_rows Total number of rows.
+ @param num_keys_per_buffer Number of keys per buffer.
+ @param elem_size Size of each element.
+
+ Calculates cost of merge sort by simulating call to merge_many_buff().
+
+ @retval
+ Computed cost of merge sort in disk seeks.
+
+ @note
+ Declared here in order to be able to unit test it,
+ since library dependencies have not been sorted out yet.
+
+ See also comments get_merge_many_buffs_cost().
+*/
+
+double get_merge_many_buffs_cost_fast(ha_rows num_rows,
+ ha_rows num_keys_per_buffer,
+ uint elem_size);
+
+
+/**
+ A wrapper class around the buffer used by filesort().
+ The buffer is a contiguous chunk of memory,
+ where the first part is <num_records> pointers to the actual data.
+
+ We wrap the buffer in order to be able to do lazy initialization of the
+ pointers: the buffer is often much larger than what we actually need.
+
+ The buffer must be kept available for multiple executions of the
+ same sort operation, so we have explicit allocate and free functions,
+ rather than doing alloc/free in CTOR/DTOR.
+*/
+class Filesort_buffer
+{
+public:
+ Filesort_buffer() :
+ m_idx_array(), m_record_length(0), m_start_of_data(NULL)
+ {}
+
+ /** Sort me... */
+ void sort_buffer(const Sort_param *param, uint count);
+
+ /// Initializes a record pointer.
+ uchar *get_record_buffer(uint idx)
+ {
+ m_idx_array[idx]= m_start_of_data + (idx * m_record_length);
+ return m_idx_array[idx];
+ }
+
+ /// Initializes all the record pointers.
+ void init_record_pointers()
+ {
+ for (uint ix= 0; ix < m_idx_array.size(); ++ix)
+ (void) get_record_buffer(ix);
+ }
+
+ /// Returns total size: pointer array + record buffers.
+ size_t sort_buffer_size() const
+ {
+ return m_idx_array.size() * (m_record_length + sizeof(uchar*));
+ }
+
+ /// Allocates the buffer, but does *not* initialize pointers.
+ uchar **alloc_sort_buffer(uint num_records, uint record_length);
+
+
+ /// Check <num_records, record_length> for the buffer
+ bool check_sort_buffer_properties(uint num_records, uint record_length)
+ {
+ return (static_cast<uint>(m_idx_array.size()) == num_records &&
+ m_record_length == m_record_length);
+ }
+
+ /// Frees the buffer.
+ void free_sort_buffer();
+
+ /// Getter, for calling routines which still use the uchar** interface.
+ uchar **get_sort_keys() { return m_idx_array.array(); }
+
+ /**
+ We need an assignment operator, see filesort().
+ This happens to have the same semantics as the one that would be
+ generated by the compiler. We still implement it here, to show shallow
+ assignment explicitly: we have two objects sharing the same array.
+ */
+ Filesort_buffer &operator=(const Filesort_buffer &rhs)
+ {
+ m_idx_array= rhs.m_idx_array;
+ m_record_length= rhs.m_record_length;
+ m_start_of_data= rhs.m_start_of_data;
+ return *this;
+ }
+
+private:
+ typedef Bounds_checked_array<uchar*> Idx_array;
+
+ Idx_array m_idx_array;
+ uint m_record_length;
+ uchar *m_start_of_data;
+};
+
+#endif // FILESORT_UTILS_INCLUDED
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 2878f25ed14..6fc30fa4fa0 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -2906,8 +2906,6 @@ int ha_ndbcluster::write_row(uchar *record)
}
ha_statistic_increment(&SSV::ha_write_count);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
if (!(op= trans->getNdbOperation(m_table)))
ERR_RETURN(trans->getNdbError());
@@ -3146,11 +3144,6 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
}
ha_statistic_increment(&SSV::ha_update_count);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- {
- table->timestamp_field->set_time();
- bitmap_set_bit(table->write_set, table->timestamp_field->field_index);
- }
if (m_use_partition_function &&
(error= get_parts_for_update(old_data, new_data, table->record[0],
@@ -8656,7 +8649,7 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table,
MEM_ROOT **root_ptr=
my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
MEM_ROOT *old_root= *root_ptr;
- init_sql_alloc(&share->mem_root, 1024, 0);
+ init_sql_alloc(&share->mem_root, 1024, 0, MYF(0));
*root_ptr= &share->mem_root; // remember to reset before return
share->state= NSS_INITIAL;
/* enough space for key, db, and table_name */
@@ -9500,7 +9493,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
thd->init_for_queries();
thd->main_security_ctx.host_or_ip= "";
thd->client_capabilities = 0;
- my_net_init(&thd->net, 0);
+ my_net_init(&thd->net, 0, MYF(MY_THREAD_SPECIFIC));
thd->main_security_ctx.master_access= ~0;
thd->main_security_ctx.priv_user[0] = 0;
/* Do not use user-supplied timeout value for system threads. */
@@ -9737,11 +9730,9 @@ next:
mysql_mutex_lock(&LOCK_ndb_util_thread);
ndb_util_thread_end:
- net_end(&thd->net);
ndb_util_thread_fail:
if (share_list)
delete [] share_list;
- thd->cleanup();
delete thd;
/* signal termination */
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 856781db28a..1544678de38 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -3666,7 +3666,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg)
thd->system_thread= SYSTEM_THREAD_NDBCLUSTER_BINLOG;
thd->main_security_ctx.host_or_ip= "";
thd->client_capabilities= 0;
- my_net_init(&thd->net, 0);
+ my_net_init(&thd->net, 0, MYF(MY_THREAD_SPECIFIC));
thd->main_security_ctx.master_access= ~0;
thd->main_security_ctx.priv_user[0]= 0;
/* Do not use user-supplied timeout value for system threads. */
@@ -3965,7 +3965,7 @@ restart:
my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
MEM_ROOT *old_root= *root_ptr;
MEM_ROOT mem_root;
- init_sql_alloc(&mem_root, 4096, 0);
+ init_sql_alloc(&mem_root, 4096, 0, MYF(0));
List<Cluster_schema> post_epoch_log_list;
List<Cluster_schema> post_epoch_unlock_list;
*root_ptr= &mem_root;
@@ -4365,8 +4365,6 @@ err:
my_hash_free(&ndb_schema_objects);
- net_end(&thd->net);
- thd->cleanup();
delete thd;
ndb_binlog_thread_running= -1;
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 4cf66daf315..ce3f17aeb92 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -168,7 +168,7 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
:handler(hton, share)
{
DBUG_ENTER("ha_partition::ha_partition(table)");
- init_alloc_root(&m_mem_root, 512, 512);
+ init_alloc_root(&m_mem_root, 512, 512, MYF(0));
init_handler_variables();
DBUG_VOID_RETURN;
}
@@ -190,7 +190,7 @@ ha_partition::ha_partition(handlerton *hton, partition_info *part_info)
{
DBUG_ENTER("ha_partition::ha_partition(part_info)");
DBUG_ASSERT(part_info);
- init_alloc_root(&m_mem_root, 512, 512);
+ init_alloc_root(&m_mem_root, 512, 512, MYF(0));
init_handler_variables();
m_part_info= part_info;
m_create_handler= TRUE;
@@ -217,7 +217,7 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share,
:handler(hton, share)
{
DBUG_ENTER("ha_partition::ha_partition(clone)");
- init_alloc_root(&m_mem_root, 512, 512);
+ init_alloc_root(&m_mem_root, 512, 512, MYF(0));
init_handler_variables();
m_part_info= part_info_arg;
m_create_handler= TRUE;
@@ -298,13 +298,6 @@ void ha_partition::init_handler_variables()
}
-const char *ha_partition::table_type() const
-{
- // we can do this since we only support a single engine type
- return m_file[0]->table_type();
-}
-
-
/*
Destructor method
@@ -3430,8 +3423,8 @@ void ha_partition::try_semi_consistent_read(bool yes)
ADDITIONAL INFO:
- We have to set timestamp fields and auto_increment fields, because those
- may be used in determining which partition the row should be written to.
+ We have to set auto_increment fields, because those may be used in
+ determining which partition the row should be written to.
*/
int ha_partition::write_row(uchar * buf)
@@ -3442,7 +3435,6 @@ int ha_partition::write_row(uchar * buf)
bool have_auto_increment= table->next_number_field && buf == table->record[0];
my_bitmap_map *old_map;
THD *thd= ha_thd();
- timestamp_auto_set_type saved_timestamp_type= table->timestamp_field_type;
ulonglong saved_sql_mode= thd->variables.sql_mode;
bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null;
#ifdef NOT_NEEDED
@@ -3451,11 +3443,6 @@ int ha_partition::write_row(uchar * buf)
DBUG_ENTER("ha_partition::write_row");
DBUG_ASSERT(buf == m_rec0);
- /* If we have a timestamp column, update it to the current time */
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
-
/*
If we have an auto_increment column and we are writing a changed row
or a new row, then update the auto_increment value in the record.
@@ -3531,7 +3518,6 @@ int ha_partition::write_row(uchar * buf)
exit:
thd->variables.sql_mode= saved_sql_mode;
table->auto_increment_field_not_null= saved_auto_inc_field_not_null;
- table->timestamp_field_type= saved_timestamp_type;
DBUG_RETURN(error);
}
@@ -3566,18 +3552,8 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
uint32 new_part_id, old_part_id;
int error= 0;
longlong func_value;
- timestamp_auto_set_type orig_timestamp_type= table->timestamp_field_type;
DBUG_ENTER("ha_partition::update_row");
- /*
- We need to set timestamp field once before we calculate
- the partition. Then we disable timestamp calculations
- inside m_file[*]->update_row() methods
- */
- if (orig_timestamp_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
-
if ((error= get_parts_for_update(old_data, new_data, table->record[0],
m_part_info, &old_part_id, &new_part_id,
&func_value)))
@@ -3651,7 +3627,6 @@ exit:
info(HA_STATUS_AUTO);
set_auto_increment_if_higher(table->found_next_number_field);
}
- table->timestamp_field_type= orig_timestamp_type;
DBUG_RETURN(error);
}
@@ -3847,6 +3822,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
SYNOPSIS
start_bulk_insert()
rows Number of rows to insert
+ flags Flags to control index creation
RETURN VALUE
NONE
@@ -3854,7 +3830,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
DESCRIPTION
rows == 0 means we will probably insert many rows
*/
-void ha_partition::start_bulk_insert(ha_rows rows)
+void ha_partition::start_bulk_insert(ha_rows rows, uint flags)
{
DBUG_ENTER("ha_partition::start_bulk_insert");
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 30262b25bd9..53f0c4f2484 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -375,7 +375,7 @@ public:
virtual int delete_row(const uchar * buf);
virtual int delete_all_rows(void);
virtual int truncate();
- virtual void start_bulk_insert(ha_rows rows);
+ virtual void start_bulk_insert(ha_rows rows, uint flags);
virtual int end_bulk_insert();
private:
ha_rows guess_bulk_insert_rows();
@@ -639,9 +639,6 @@ public:
*/
virtual const char *index_type(uint inx);
- /* The name of the table type that will be used for display purposes */
- virtual const char *table_type() const;
-
/* The name of the row type used for the underlying tables. */
virtual enum row_type get_row_type() const;
diff --git a/sql/handler.cc b/sql/handler.cc
index 058e219f76c..150bbee39a9 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -644,6 +644,43 @@ void ha_checkpoint_state(bool disable)
}
+struct st_commit_checkpoint_request {
+ void *cookie;
+ void (*pre_hook)(void *);
+};
+
+static my_bool commit_checkpoint_request_handlerton(THD *unused1, plugin_ref plugin,
+ void *data)
+{
+ st_commit_checkpoint_request *st= (st_commit_checkpoint_request *)data;
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ if (hton->state == SHOW_OPTION_YES && hton->commit_checkpoint_request)
+ {
+ void *cookie= st->cookie;
+ if (st->pre_hook)
+ (*st->pre_hook)(cookie);
+ (*hton->commit_checkpoint_request)(hton, cookie);
+ }
+ return FALSE;
+}
+
+
+/*
+ Invoke commit_checkpoint_request() in all storage engines that implement it.
+
+ If pre_hook is non-NULL, the hook will be called prior to each invocation.
+*/
+void
+ha_commit_checkpoint_request(void *cookie, void (*pre_hook)(void *))
+{
+ st_commit_checkpoint_request st;
+ st.cookie= cookie;
+ st.pre_hook= pre_hook;
+ plugin_foreach(NULL, commit_checkpoint_request_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &st);
+}
+
+
static my_bool closecon_handlerton(THD *thd, plugin_ref plugin,
void *unused)
@@ -1299,11 +1336,13 @@ int ha_commit_trans(THD *thd, bool all)
goto done;
}
+ DEBUG_SYNC(thd, "ha_commit_trans_before_log_and_order");
cookie= tc_log->log_and_order(thd, xid, all, need_prepare_ordered,
need_commit_ordered);
if (!cookie)
goto err;
+ DEBUG_SYNC(thd, "ha_commit_trans_after_log_and_order");
DBUG_EXECUTE_IF("crash_commit_after_log", DBUG_SUICIDE(););
error= commit_one_phase_2(thd, all, trans, is_real_trans) ? 2 : 0;
@@ -1795,6 +1834,17 @@ bool mysql_xa_recover(THD *thd)
DBUG_RETURN(0);
}
+/*
+ Called by engine to notify TC that a new commit checkpoint has been reached.
+ See comments on handlerton method commit_checkpoint_request() for details.
+*/
+void
+commit_checkpoint_notify_ha(handlerton *hton, void *cookie)
+{
+ tc_log->commit_checkpoint_notify(cookie);
+}
+
+
/**
@details
This function should be called when MySQL sends rows of a SELECT result set
@@ -5350,7 +5400,7 @@ fl_log_iterator_buffer_init(struct handler_iterator *iterator)
/* to be able to make my_free without crash in case of error */
iterator->buffer= 0;
- if (!(dirp = my_dir(fl_dir, MYF(0))))
+ if (!(dirp = my_dir(fl_dir, MYF(MY_THREAD_SPECIFIC))))
{
return HA_ITERATOR_ERROR;
}
@@ -5359,7 +5409,7 @@ fl_log_iterator_buffer_init(struct handler_iterator *iterator)
sizeof(enum log_status) +
+ FN_REFLEN + 1) *
(uint) dirp->number_off_files),
- MYF(0))) == 0)
+ MYF(MY_THREAD_SPECIFIC))) == 0)
{
return HA_ITERATOR_ERROR;
}
@@ -5393,6 +5443,7 @@ fl_log_iterator_buffer_init(struct handler_iterator *iterator)
iterator->buffer= buff;
iterator->next= &fl_log_iterator_next;
iterator->destroy= &fl_log_iterator_destroy;
+ my_dirend(dirp);
return HA_ITERATOR_OK;
}
diff --git a/sql/handler.h b/sql/handler.h
index 4a91d989e52..c38dec198b9 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -600,6 +600,7 @@ enum enum_schema_tables
SCH_COLUMN_PRIVILEGES,
SCH_ENGINES,
SCH_EVENTS,
+ SCH_EXPLAIN,
SCH_FILES,
SCH_GLOBAL_STATUS,
SCH_GLOBAL_VARIABLES,
@@ -980,6 +981,46 @@ struct handlerton
int (*recover)(handlerton *hton, XID *xid_list, uint len);
int (*commit_by_xid)(handlerton *hton, XID *xid);
int (*rollback_by_xid)(handlerton *hton, XID *xid);
+ /*
+ The commit_checkpoint_request() handlerton method is used to checkpoint
+ the XA recovery process for storage engines that support two-phase
+ commit.
+
+ The method is optional - an engine that does not implemented is expected
+ to work the traditional way, where every commit() durably flushes the
+ transaction to disk in the engine before completion, so XA recovery will
+ no longer be needed for that transaction.
+
+ An engine that does implement commit_checkpoint_request() is also
+ expected to implement commit_ordered(), so that ordering of commits is
+ consistent between 2pc participants. Such engine is no longer required to
+ durably flush to disk transactions in commit(), provided that the
+ transaction has been successfully prepare()d and commit_ordered(); thus
+ potentionally saving one fsync() call. (Engine must still durably flush
+ to disk in commit() when no prepare()/commit_ordered() steps took place,
+ at least if durable commits are wanted; this happens eg. if binlog is
+ disabled).
+
+ The TC will periodically (eg. once per binlog rotation) call
+ commit_checkpoint_request(). When this happens, the engine must arrange
+ for all transaction that have completed commit_ordered() to be durably
+ flushed to disk (this does not include transactions that might be in the
+ middle of executing commit_ordered()). When such flush has completed, the
+ engine must call commit_checkpoint_notify_ha(), passing back the opaque
+ "cookie".
+
+ The flush and call of commit_checkpoint_notify_ha() need not happen
+ immediately - it can be scheduled and performed asynchroneously (ie. as
+ part of next prepare(), or sync every second, or whatever), but should
+ not be postponed indefinitely. It is however also permissible to do it
+ immediately, before returning from commit_checkpoint_request().
+
+ When commit_checkpoint_notify_ha() is called, the TC will know that the
+ transactions are durably committed, and thus no longer require XA
+ recovery. It uses that to reduce the work needed for any subsequent XA
+ recovery process.
+ */
+ void (*commit_checkpoint_request)(handlerton *hton, void *cookie);
/*
"Disable or enable checkpointing internal to the storage engine. This is
used for FLUSH TABLES WITH READ LOCK AND DISABLE CHECKPOINT to ensure that
@@ -1917,11 +1958,11 @@ public:
/** to be actually called to get 'check()' functionality*/
int ha_check(THD *thd, HA_CHECK_OPT *check_opt);
int ha_repair(THD* thd, HA_CHECK_OPT* check_opt);
- void ha_start_bulk_insert(ha_rows rows)
+ void ha_start_bulk_insert(ha_rows rows, uint flags= 0)
{
DBUG_ENTER("handler::ha_start_bulk_insert");
estimation_rows_to_insert= rows;
- start_bulk_insert(rows);
+ start_bulk_insert(rows, flags);
DBUG_VOID_RETURN;
}
int ha_end_bulk_insert()
@@ -2373,7 +2414,7 @@ public:
{ return; } /* prepare InnoDB for HANDLER */
virtual void free_foreign_key_create_info(char* str) {}
/** The following can be called without an open handler */
- virtual const char *table_type() const =0;
+ const char *table_type() const { return hton_name(ht)->str; }
/**
If frm_error() is called then we will use this to find out what file
extentions exist for the storage engine. This is also used by the default
@@ -2826,7 +2867,7 @@ private:
DBUG_ASSERT(!(ha_table_flags() & HA_CAN_REPAIR));
return HA_ADMIN_NOT_IMPLEMENTED;
}
- virtual void start_bulk_insert(ha_rows rows) {}
+ virtual void start_bulk_insert(ha_rows rows, uint flags) {}
virtual int end_bulk_insert() { return 0; }
virtual int index_read(uchar * buf, const uchar * key, uint key_len,
enum ha_rkey_function find_flag)
@@ -2985,6 +3026,7 @@ void ha_kill_query(THD* thd, enum thd_kill_levels level);
bool ha_flush_logs(handlerton *db_type);
void ha_drop_database(char* path);
void ha_checkpoint_state(bool disable);
+void ha_commit_checkpoint_request(void *cookie, void (*pre_hook)(void *));
int ha_create_table(THD *thd, const char *path,
const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
@@ -3065,6 +3107,7 @@ int ha_binlog_end(THD *thd);
const char *get_canonical_filename(handler *file, const char *path,
char *tmp_path);
bool mysql_xa_recover(THD *thd);
+void commit_checkpoint_notify_ha(handlerton *hton, void *cookie);
inline const char *table_case_name(HA_CREATE_INFO *info, const char *name)
{
diff --git a/sql/item.cc b/sql/item.cc
index 2c435eb6a9f..96f61c08982 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -9663,14 +9663,3 @@ const char *dbug_print_item(Item *item)
#endif /*DBUG_OFF*/
-/*****************************************************************************
-** Instantiate templates
-*****************************************************************************/
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class List<Item>;
-template class List_iterator<Item>;
-template class List_iterator_fast<Item>;
-template class List_iterator_fast<Item_field>;
-template class List<List_item>;
-#endif
diff --git a/sql/item.h b/sql/item.h
index bdf6fbe548e..9b10cdf601b 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -523,7 +523,7 @@ public:
struct st_dyncall_create_def
{
- Item *num, *value;
+ Item *key, *value;
CHARSET_INFO *cs;
uint len, frac;
DYNAMIC_COLUMN_TYPE type;
diff --git a/sql/item_buff.cc b/sql/item_buff.cc
index 86e0fd32774..ce396736d6f 100644
--- a/sql/item_buff.cc
+++ b/sql/item_buff.cc
@@ -173,12 +173,3 @@ bool Cached_item_decimal::cmp()
return FALSE;
}
-
-/*****************************************************************************
-** Instansiate templates
-*****************************************************************************/
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class List<Cached_item>;
-template class List_iterator<Cached_item>;
-#endif
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 3e64e3969e4..3b09da68927 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -6072,23 +6072,87 @@ Item* Item_equal::get_first(JOIN_TAB *context, Item *field_item)
}
-longlong Item_func_dyncol_exists::val_int()
+longlong Item_func_dyncol_check::val_int()
{
char buff[STRING_BUFFER_USUAL_SIZE];
String tmp(buff, sizeof(buff), &my_charset_bin);
DYNAMIC_COLUMN col;
String *str;
- ulonglong num;
enum enum_dyncol_func_result rc;
- num= args[1]->val_int();
+ str= args[0]->val_str(&tmp);
+ if (args[0]->null_value)
+ goto null;
+ col.length= str->length();
+ /* We do not change the string, so could do this trick */
+ col.str= (char *)str->ptr();
+ rc= mariadb_dyncol_check(&col);
+ if (rc < 0 && rc != ER_DYNCOL_FORMAT)
+ {
+ dynamic_column_error_message(rc);
+ goto null;
+ }
+ null_value= FALSE;
+ return rc == ER_DYNCOL_OK;
+
+null:
+ null_value= TRUE;
+ return 0;
+}
+
+longlong Item_func_dyncol_exists::val_int()
+{
+ char buff[STRING_BUFFER_USUAL_SIZE], nmstrbuf[11];
+ String tmp(buff, sizeof(buff), &my_charset_bin),
+ nmbuf(nmstrbuf, sizeof(nmstrbuf), system_charset_info);
+ DYNAMIC_COLUMN col;
+ String *str;
+ LEX_STRING buf, *name= NULL;
+ ulonglong num= 0;
+ enum enum_dyncol_func_result rc;
+
+ if (args[1]->result_type() == INT_RESULT)
+ num= args[1]->val_int();
+ else
+ {
+ String *nm= args[1]->val_str(&nmbuf);
+ if (!nm || args[1]->null_value)
+ {
+ null_value= 1;
+ return 1;
+ }
+ if (my_charset_same(nm->charset(), &my_charset_utf8_general_ci))
+ {
+ buf.str= (char *) nm->ptr();
+ buf.length= nm->length();
+ }
+ else
+ {
+ uint strlen;
+ uint dummy_errors;
+ buf.str= (char *)sql_alloc((strlen= nm->length() *
+ my_charset_utf8_general_ci.mbmaxlen + 1));
+ if (buf.str)
+ {
+ buf.length=
+ copy_and_convert(buf.str, strlen, &my_charset_utf8_general_ci,
+ nm->ptr(), nm->length(), nm->charset(),
+ &dummy_errors);
+ }
+ else
+ buf.length= 0;
+ }
+ name= &buf;
+ }
str= args[0]->val_str(&tmp);
if (args[0]->null_value || args[1]->null_value || num > UINT_MAX16)
goto null;
col.length= str->length();
/* We do not change the string, so could do this trick */
col.str= (char *)str->ptr();
- rc= dynamic_column_exists(&col, (uint) num);
+ rc= ((name == NULL) ?
+ mariadb_dyncol_exists(&col, (uint) num) :
+ mariadb_dyncol_exists_named(&col, name));
if (rc < 0)
{
dynamic_column_error_message(rc);
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index eed9028a630..afb7bf005bb 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -1883,6 +1883,14 @@ public:
Item *neg_transformer(THD *thd);
};
+class Item_func_dyncol_check :public Item_bool_func
+{
+public:
+ Item_func_dyncol_check(Item *str) :Item_bool_func(str) {}
+ longlong val_int();
+ const char *func_name() const { return "column_check"; }
+};
+
class Item_func_dyncol_exists :public Item_bool_func
{
public:
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 96837a8f262..fc31b074055 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -526,6 +526,54 @@ protected:
virtual ~Create_func_coercibility() {}
};
+class Create_func_dyncol_check : public Create_func_arg1
+{
+public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_dyncol_check s_singleton;
+
+protected:
+ Create_func_dyncol_check() {}
+ virtual ~Create_func_dyncol_check() {}
+};
+
+class Create_func_dyncol_exists : public Create_func_arg2
+{
+public:
+ virtual Item *create_2_arg(THD *thd, Item *arg1, Item *arg2);
+
+ static Create_func_dyncol_exists s_singleton;
+
+protected:
+ Create_func_dyncol_exists() {}
+ virtual ~Create_func_dyncol_exists() {}
+};
+
+class Create_func_dyncol_list : public Create_func_arg1
+{
+public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_dyncol_list s_singleton;
+
+protected:
+ Create_func_dyncol_list() {}
+ virtual ~Create_func_dyncol_list() {}
+};
+
+class Create_func_dyncol_json : public Create_func_arg1
+{
+public:
+ virtual Item *create_1_arg(THD *thd, Item *arg1);
+
+ static Create_func_dyncol_json s_singleton;
+
+protected:
+ Create_func_dyncol_json() {}
+ virtual ~Create_func_dyncol_json() {}
+};
+
class Create_func_compress : public Create_func_arg1
{
@@ -3108,6 +3156,38 @@ Create_func_coercibility::create_1_arg(THD *thd, Item *arg1)
}
+Create_func_dyncol_check Create_func_dyncol_check::s_singleton;
+
+Item*
+Create_func_dyncol_check::create_1_arg(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_dyncol_check(arg1);
+}
+
+Create_func_dyncol_exists Create_func_dyncol_exists::s_singleton;
+
+Item*
+Create_func_dyncol_exists::create_2_arg(THD *thd, Item *arg1, Item *arg2)
+{
+ return new (thd->mem_root) Item_func_dyncol_exists(arg1, arg2);
+}
+
+Create_func_dyncol_list Create_func_dyncol_list::s_singleton;
+
+Item*
+Create_func_dyncol_list::create_1_arg(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_dyncol_list(arg1);
+}
+
+Create_func_dyncol_json Create_func_dyncol_json::s_singleton;
+
+Item*
+Create_func_dyncol_json::create_1_arg(THD *thd, Item *arg1)
+{
+ return new (thd->mem_root) Item_func_dyncol_json(arg1);
+}
+
Create_func_concat Create_func_concat::s_singleton;
Item*
@@ -4393,27 +4473,34 @@ Create_func_master_pos_wait::create_native(THD *thd, LEX_STRING name,
if (item_list != NULL)
arg_count= item_list->elements;
+ if (arg_count < 2 || arg_count > 4)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return func;
+ }
+
+ thd->lex->safe_to_cache_query= 0;
+
+ Item *param_1= item_list->pop();
+ Item *param_2= item_list->pop();
switch (arg_count) {
case 2:
{
- Item *param_1= item_list->pop();
- Item *param_2= item_list->pop();
func= new (thd->mem_root) Item_master_pos_wait(param_1, param_2);
- thd->lex->safe_to_cache_query= 0;
break;
}
case 3:
{
- Item *param_1= item_list->pop();
- Item *param_2= item_list->pop();
Item *param_3= item_list->pop();
func= new (thd->mem_root) Item_master_pos_wait(param_1, param_2, param_3);
- thd->lex->safe_to_cache_query= 0;
break;
}
- default:
+ case 4:
{
- my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ Item *param_3= item_list->pop();
+ Item *param_4= item_list->pop();
+ func= new (thd->mem_root) Item_master_pos_wait(param_1, param_2, param_3,
+ param_4);
break;
}
}
@@ -5245,6 +5332,10 @@ static Native_func_registry func_array[] =
{ { C_STRING_WITH_LEN("CHARACTER_LENGTH") }, BUILDER(Create_func_char_length)},
{ { C_STRING_WITH_LEN("CHAR_LENGTH") }, BUILDER(Create_func_char_length)},
{ { C_STRING_WITH_LEN("COERCIBILITY") }, BUILDER(Create_func_coercibility)},
+ { { C_STRING_WITH_LEN("COLUMN_CHECK") }, BUILDER(Create_func_dyncol_check)},
+ { { C_STRING_WITH_LEN("COLUMN_EXISTS") }, BUILDER(Create_func_dyncol_exists)},
+ { { C_STRING_WITH_LEN("COLUMN_LIST") }, BUILDER(Create_func_dyncol_list)},
+ { { C_STRING_WITH_LEN("COLUMN_JSON") }, BUILDER(Create_func_dyncol_json)},
{ { C_STRING_WITH_LEN("COMPRESS") }, BUILDER(Create_func_compress)},
{ { C_STRING_WITH_LEN("CONCAT") }, BUILDER(Create_func_concat)},
{ { C_STRING_WITH_LEN("CONCAT_WS") }, BUILDER(Create_func_concat_ws)},
@@ -5704,7 +5795,7 @@ static List<Item> *create_func_dyncol_prepare(THD *thd,
for (uint i= 0; (def= li++) ;)
{
dfs[0][i++]= *def;
- args->push_back(def->num);
+ args->push_back(def->key);
args->push_back(def->value);
}
return args;
@@ -5720,7 +5811,6 @@ Item *create_func_dyncol_create(THD *thd, List<DYNCALL_CREATE_DEF> &list)
return new (thd->mem_root) Item_func_dyncol_create(*args, dfs);
}
-
Item *create_func_dyncol_add(THD *thd, Item *str,
List<DYNCALL_CREATE_DEF> &list)
{
@@ -5740,7 +5830,7 @@ Item *create_func_dyncol_add(THD *thd, Item *str,
Item *create_func_dyncol_delete(THD *thd, Item *str, List<Item> &nums)
{
DYNCALL_CREATE_DEF *dfs;
- Item *num;
+ Item *key;
List_iterator_fast<Item> it(nums);
List<Item> *args= new (thd->mem_root) List<Item>;
@@ -5750,12 +5840,12 @@ Item *create_func_dyncol_delete(THD *thd, Item *str, List<Item> &nums)
if (!args || !dfs)
return NULL;
- for (uint i= 0; (num= it++); i++)
+ for (uint i= 0; (key= it++); i++)
{
- dfs[i].num= num;
+ dfs[i].key= key;
dfs[i].value= new Item_null();
dfs[i].type= DYN_COL_INT;
- args->push_back(dfs[i].num);
+ args->push_back(dfs[i].key);
args->push_back(dfs[i].value);
}
diff --git a/sql/item_create.h b/sql/item_create.h
index ac6b0f8454f..5ecb45e9eae 100644
--- a/sql/item_create.h
+++ b/sql/item_create.h
@@ -180,5 +180,6 @@ Item *create_func_dyncol_get(THD *thd, Item *num, Item *str,
Cast_target cast_type,
const char *c_len, const char *c_dec,
CHARSET_INFO *cs);
+Item *create_func_dyncol_json(THD *thd, Item *str);
#endif
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 7f309c7a4d3..aca76eac82a 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -3869,13 +3869,45 @@ longlong Item_master_pos_wait::val_int()
#ifdef HAVE_REPLICATION
longlong pos = (ulong)args[1]->val_int();
longlong timeout = (arg_count==3) ? args[2]->val_int() : 0 ;
- if ((event_count = active_mi->rli.wait_for_pos(thd, log_name, pos, timeout)) == -2)
+ String connection_name_buff;
+ LEX_STRING connection_name;
+ Master_info *mi;
+ if (arg_count == 4)
+ {
+ String *con;
+ if (!(con= args[3]->val_str(&connection_name_buff)))
+ goto err;
+
+ connection_name.str= (char*) con->ptr();
+ connection_name.length= con->length();
+ if (check_master_connection_name(&connection_name))
+ {
+ my_error(ER_WRONG_ARGUMENTS, MYF(ME_JUST_WARNING),
+ "MASTER_CONNECTION_NAME");
+ goto err;
+ }
+ }
+ else
+ connection_name= thd->variables.default_master_connection;
+
+ if (!(mi= master_info_index->get_master_info(&connection_name,
+ MYSQL_ERROR::WARN_LEVEL_WARN)))
+ goto err;
+ if ((event_count = mi->rli.wait_for_pos(thd, log_name, pos, timeout)) == -2)
{
null_value = 1;
event_count=0;
}
#endif
return event_count;
+
+#ifdef HAVE_REPLICATION
+err:
+ {
+ null_value = 1;
+ return 0;
+ }
+#endif
}
@@ -4303,7 +4335,7 @@ longlong Item_func_sleep::val_int()
#define extra_size sizeof(double)
-static user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
+user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
bool create_if_not_exists)
{
user_var_entry *entry;
@@ -4315,7 +4347,9 @@ static user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
uint size=ALIGN_SIZE(sizeof(user_var_entry))+name.length+1+extra_size;
if (!my_hash_inited(hash))
return 0;
- if (!(entry = (user_var_entry*) my_malloc(size,MYF(MY_WME | ME_FATALERROR))))
+ if (!(entry = (user_var_entry*) my_malloc(size,
+ MYF(MY_WME | ME_FATALERROR |
+ MY_THREAD_SPECIFIC))))
return 0;
entry->name.str=(char*) entry+ ALIGN_SIZE(sizeof(user_var_entry))+
extra_size;
@@ -4543,7 +4577,8 @@ update_hash(user_var_entry *entry, bool set_null, void *ptr, uint length,
entry->value=0;
entry->value= (char*) my_realloc(entry->value, length,
MYF(MY_ALLOW_ZERO_PTR | MY_WME |
- ME_FATALERROR));
+ ME_FATALERROR |
+ MY_THREAD_SPECIFIC));
if (!entry->value)
return 1;
}
diff --git a/sql/item_func.h b/sql/item_func.h
index 7b6b60b2914..f562c87fe1c 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -60,7 +60,7 @@ public:
NOW_FUNC, TRIG_COND_FUNC,
SUSERVAR_FUNC, GUSERVAR_FUNC, COLLATE_FUNC,
EXTRACT_FUNC, CHAR_TYPECAST_FUNC, FUNC_SP, UDF_FUNC,
- NEG_FUNC, GSYSVAR_FUNC };
+ NEG_FUNC, GSYSVAR_FUNC, DYNCOL_FUNC };
enum optimize_type { OPTIMIZE_NONE,OPTIMIZE_KEY,OPTIMIZE_OP, OPTIMIZE_NULL,
OPTIMIZE_EQUAL };
enum Type type() const { return FUNC_ITEM; }
@@ -515,6 +515,8 @@ public:
{ collation.set_numeric(); fix_char_length(21); }
Item_int_func(Item *a,Item *b,Item *c) :Item_func(a,b,c)
{ collation.set_numeric(); fix_char_length(21); }
+ Item_int_func(Item *a,Item *b,Item *c, Item *d) :Item_func(a,b,c,d)
+ { collation.set_numeric(); fix_char_length(21); }
Item_int_func(List<Item> &list) :Item_func(list)
{ collation.set_numeric(); fix_char_length(21); }
Item_int_func(THD *thd, Item_int_func *item) :Item_func(thd, item)
@@ -1542,6 +1544,7 @@ class Item_master_pos_wait :public Item_int_func
public:
Item_master_pos_wait(Item *a,Item *b) :Item_int_func(a,b) {}
Item_master_pos_wait(Item *a,Item *b,Item *c) :Item_int_func(a,b,c) {}
+ Item_master_pos_wait(Item *a,Item *b, Item *c, Item *d) :Item_int_func(a,b,c,d) {}
longlong val_int();
const char *func_name() const { return "master_pos_wait"; }
void fix_length_and_dec() { max_length=21; set_persist_maybe_null(1);}
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index c0a6120b361..bcd3b881934 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -58,6 +58,7 @@
C_MODE_START
#include "../mysys/my_static.h" // For soundex_map
C_MODE_END
+#include "sql_show.h" // append_identifier
/**
@todo Remove this. It is not safe to use a shared String object.
@@ -3786,7 +3787,8 @@ String *Item_func_uuid::val_str(String *str)
Item_func_dyncol_create::Item_func_dyncol_create(List<Item> &args,
DYNCALL_CREATE_DEF *dfs)
- : Item_str_func(args), defs(dfs), vals(0), nums(0)
+ : Item_str_func(args), defs(dfs), vals(0), keys_num(NULL), keys_str(NULL),
+ names(FALSE), force_names(FALSE)
{
DBUG_ASSERT((args.elements & 0x1) == 0); // even number of arguments
}
@@ -3794,14 +3796,28 @@ Item_func_dyncol_create::Item_func_dyncol_create(List<Item> &args,
bool Item_func_dyncol_create::fix_fields(THD *thd, Item **ref)
{
+ uint i;
bool res= Item_func::fix_fields(thd, ref); // no need Item_str_func here
- vals= (DYNAMIC_COLUMN_VALUE *) alloc_root(thd->mem_root,
- sizeof(DYNAMIC_COLUMN_VALUE) *
- (arg_count / 2));
- nums= (uint *) alloc_root(thd->mem_root,
- sizeof(uint) * (arg_count / 2));
- status_var_increment(thd->status_var.feature_dynamic_columns);
- return res || vals == 0 || nums == 0;
+ if (!res)
+ {
+ vals= (DYNAMIC_COLUMN_VALUE *) alloc_root(thd->mem_root,
+ sizeof(DYNAMIC_COLUMN_VALUE) *
+ (arg_count / 2));
+ for (i= 0; i + 1 < arg_count && args[i]->result_type() == INT_RESULT; i+= 2);
+ if (i + 1 < arg_count)
+ {
+ names= TRUE;
+ }
+
+ keys_num= (uint *) alloc_root(thd->mem_root,
+ (sizeof(LEX_STRING) > sizeof(uint) ?
+ sizeof(LEX_STRING) :
+ sizeof(uint)) *
+ (arg_count / 2));
+ keys_str= (LEX_STRING *) keys_num;
+ status_var_increment(thd->status_var.feature_dynamic_columns);
+ }
+ return res || vals == 0 || keys_num == 0;
}
@@ -3812,13 +3828,49 @@ void Item_func_dyncol_create::fix_length_and_dec()
decimals= 0;
}
-void Item_func_dyncol_create::prepare_arguments()
+bool Item_func_dyncol_create::prepare_arguments(bool force_names_arg)
{
char buff[STRING_BUFFER_USUAL_SIZE];
String *res, tmp(buff, sizeof(buff), &my_charset_bin);
uint column_count= (arg_count / 2);
uint i;
my_decimal dtmp, *dres;
+ force_names= force_names_arg;
+
+ if (!(names || force_names))
+ {
+ for (i= 0; i < column_count; i++)
+ {
+ uint valpos= i * 2 + 1;
+ DYNAMIC_COLUMN_TYPE type= defs[i].type;
+ if (type == DYN_COL_NULL)
+ switch (args[valpos]->field_type())
+ {
+ case MYSQL_TYPE_VARCHAR:
+ case MYSQL_TYPE_ENUM:
+ case MYSQL_TYPE_SET:
+ case MYSQL_TYPE_TINY_BLOB:
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ case MYSQL_TYPE_LONG_BLOB:
+ case MYSQL_TYPE_BLOB:
+ case MYSQL_TYPE_VAR_STRING:
+ case MYSQL_TYPE_STRING:
+ case MYSQL_TYPE_GEOMETRY:
+ type= DYN_COL_STRING;
+ break;
+ default:
+ break;
+ }
+
+ if (type == DYN_COL_STRING &&
+ args[valpos]->type() == Item::FUNC_ITEM &&
+ ((Item_func *)args[valpos])->functype() == DYNCOL_FUNC)
+ {
+ force_names= 1;
+ break;
+ }
+ }
+ }
/* get values */
for (i= 0; i < column_count; i++)
@@ -3877,7 +3929,59 @@ void Item_func_dyncol_create::prepare_arguments()
break;
}
}
- nums[i]= (uint) args[i * 2]->val_int();
+ if (type == DYN_COL_STRING &&
+ args[valpos]->type() == Item::FUNC_ITEM &&
+ ((Item_func *)args[valpos])->functype() == DYNCOL_FUNC)
+ {
+ DBUG_ASSERT(names || force_names);
+ type= DYN_COL_DYNCOL;
+ }
+ if (names || force_names)
+ {
+ res= args[i * 2]->val_str(&tmp);
+ if (res)
+ {
+ // guaranty UTF-8 string for names
+ if (my_charset_same(res->charset(), &my_charset_utf8_general_ci))
+ {
+ keys_str[i].length= res->length();
+ keys_str[i].str= sql_strmake(res->ptr(), res->length());
+ }
+ else
+ {
+ uint strlen;
+ uint dummy_errors;
+ char *str=
+ (char *)sql_alloc((strlen= res->length() *
+ my_charset_utf8_general_ci.mbmaxlen + 1));
+ if (str)
+ {
+ keys_str[i].length=
+ copy_and_convert(str, strlen, &my_charset_utf8_general_ci,
+ res->ptr(), res->length(), res->charset(),
+ &dummy_errors);
+ keys_str[i].str= str;
+ }
+ else
+ keys_str[i].length= 0;
+
+ }
+ }
+ else
+ {
+ keys_str[i].length= 0;
+ keys_str[i].str= NULL;
+ }
+ }
+ else
+ keys_num[i]= (uint) args[i * 2]->val_int();
+ if (args[i * 2]->null_value)
+ {
+ /* to make cleanup possible */
+ for (; i < column_count; i++)
+ vals[i].type= DYN_COL_NULL;
+ return 1;
+ }
vals[i].type= type;
switch (type) {
case DYN_COL_NULL:
@@ -3892,11 +3996,11 @@ void Item_func_dyncol_create::prepare_arguments()
case DYN_COL_DOUBLE:
vals[i].x.double_value= args[valpos]->val_real();
break;
+ case DYN_COL_DYNCOL:
case DYN_COL_STRING:
res= args[valpos]->val_str(&tmp);
if (res &&
- (vals[i].x.string.value.str= my_strndup(res->ptr(), res->length(),
- MYF(MY_WME))))
+ (vals[i].x.string.value.str= sql_strmake(res->ptr(), res->length())))
{
vals[i].x.string.value.length= res->length();
vals[i].x.string.charset= res->charset();
@@ -3911,7 +4015,7 @@ void Item_func_dyncol_create::prepare_arguments()
case DYN_COL_DECIMAL:
if ((dres= args[valpos]->val_decimal(&dtmp)))
{
- dynamic_column_prepare_decimal(&vals[i]);
+ mariadb_dyncol_prepare_decimal(&vals[i]);
DBUG_ASSERT(vals[i].x.decimal.value.len == dres->len);
vals[i].x.decimal.value.intg= dres->intg;
vals[i].x.decimal.value.frac= dres->frac;
@@ -3921,7 +4025,7 @@ void Item_func_dyncol_create::prepare_arguments()
}
else
{
- dynamic_column_prepare_decimal(&vals[i]); // just to be safe
+ mariadb_dyncol_prepare_decimal(&vals[i]); // just to be safe
DBUG_ASSERT(args[valpos]->null_value);
}
break;
@@ -3940,24 +4044,12 @@ void Item_func_dyncol_create::prepare_arguments()
}
if (vals[i].type != DYN_COL_NULL && args[valpos]->null_value)
{
- if (vals[i].type == DYN_COL_STRING)
- my_free(vals[i].x.string.value.str);
vals[i].type= DYN_COL_NULL;
}
}
+ return FALSE;
}
-void Item_func_dyncol_create::cleanup_arguments()
-{
- uint column_count= (arg_count / 2);
- uint i;
-
- for (i= 0; i < column_count; i++)
- {
- if (vals[i].type == DYN_COL_STRING)
- my_free(vals[i].x.string.value.str);
- }
-}
String *Item_func_dyncol_create::val_str(String *str)
{
@@ -3967,30 +4059,37 @@ String *Item_func_dyncol_create::val_str(String *str)
enum enum_dyncol_func_result rc;
DBUG_ASSERT((arg_count & 0x1) == 0); // even number of arguments
- prepare_arguments();
-
- if ((rc= dynamic_column_create_many(&col, column_count, nums, vals)))
+ if (prepare_arguments(FALSE))
{
- dynamic_column_error_message(rc);
- dynamic_column_column_free(&col);
res= NULL;
- null_value= TRUE;
+ null_value= 1;
}
else
{
- /* Move result from DYNAMIC_COLUMN to str_value */
- char *ptr;
- size_t length, alloc_length;
- dynamic_column_reassociate(&col, &ptr, &length, &alloc_length);
- str_value.reassociate(ptr, (uint32) length, (uint32) alloc_length,
- &my_charset_bin);
- res= &str_value;
- null_value= FALSE;
+ if ((rc= ((names || force_names) ?
+ mariadb_dyncol_create_many_named(&col, column_count, keys_str,
+ vals, TRUE) :
+ mariadb_dyncol_create_many(&col, column_count, keys_num,
+ vals, TRUE))))
+ {
+ dynamic_column_error_message(rc);
+ dynamic_column_column_free(&col);
+ res= NULL;
+ null_value= TRUE;
+ }
+ else
+ {
+ /* Move result from DYNAMIC_COLUMN to str_value */
+ char *ptr;
+ size_t length, alloc_length;
+ dynstr_reassociate(&col, &ptr, &length, &alloc_length);
+ str_value.reassociate(ptr, (uint32) length, (uint32) alloc_length,
+ &my_charset_bin);
+ res= &str_value;
+ null_value= FALSE;
+ }
}
- /* cleanup */
- cleanup_arguments();
-
return res;
}
@@ -4016,6 +4115,7 @@ void Item_func_dyncol_create::print_arguments(String *str,
case DYN_COL_DOUBLE:
str->append(STRING_WITH_LEN(" AS double"));
break;
+ case DYN_COL_DYNCOL:
case DYN_COL_STRING:
str->append(STRING_WITH_LEN(" AS char"));
if (defs[i].cs)
@@ -4053,6 +4153,40 @@ void Item_func_dyncol_create::print(String *str,
str->append(')');
}
+String *Item_func_dyncol_json::val_str(String *str)
+{
+ DYNAMIC_STRING json, col;
+ String *res;
+ enum enum_dyncol_func_result rc;
+
+ res= args[0]->val_str(str);
+ if (args[0]->null_value)
+ goto null;
+
+ col.str= (char *)res->ptr();
+ col.length= res->length();
+ if ((rc= mariadb_dyncol_json(&col, &json)))
+ {
+ dynamic_column_error_message(rc);
+ goto null;
+ }
+ bzero(&col, sizeof(col));
+ {
+ /* Move result from DYNAMIC_COLUMN to str */
+ char *ptr;
+ size_t length, alloc_length;
+ dynstr_reassociate(&json, &ptr, &length, &alloc_length);
+ str->reassociate(ptr, (uint32) length, (uint32) alloc_length,
+ &my_charset_utf8_general_ci);
+ null_value= FALSE;
+ }
+ return str;
+
+null:
+ bzero(&col, sizeof(col));
+ null_value= TRUE;
+ return NULL;
+}
String *Item_func_dyncol_add::val_str(String *str)
{
@@ -4064,21 +4198,25 @@ String *Item_func_dyncol_add::val_str(String *str)
/* We store the packed data last */
res= args[arg_count - 1]->val_str(str);
- if (args[arg_count - 1]->null_value)
+ if (args[arg_count - 1]->null_value ||
+ init_dynamic_string(&col, NULL, res->length() + STRING_BUFFER_USUAL_SIZE,
+ STRING_BUFFER_USUAL_SIZE))
goto null;
- init_dynamic_string(&col, NULL, res->length() + STRING_BUFFER_USUAL_SIZE,
- STRING_BUFFER_USUAL_SIZE);
col.length= res->length();
memcpy(col.str, res->ptr(), col.length);
- prepare_arguments();
+ if (prepare_arguments(mariadb_dyncol_has_names(&col)))
+ goto null;
- if ((rc= dynamic_column_update_many(&col, column_count, nums, vals)))
+ if ((rc= ((names || force_names) ?
+ mariadb_dyncol_update_many_named(&col, column_count,
+ keys_str, vals) :
+ mariadb_dyncol_update_many(&col, column_count,
+ keys_num, vals))))
{
dynamic_column_error_message(rc);
dynamic_column_column_free(&col);
- cleanup_arguments();
goto null;
}
@@ -4086,16 +4224,12 @@ String *Item_func_dyncol_add::val_str(String *str)
/* Move result from DYNAMIC_COLUMN to str */
char *ptr;
size_t length, alloc_length;
- dynamic_column_reassociate(&col, &ptr, &length, &alloc_length);
+ dynstr_reassociate(&col, &ptr, &length, &alloc_length);
str->reassociate(ptr, (uint32) length, (uint32) alloc_length,
&my_charset_bin);
null_value= FALSE;
}
- /* cleanup */
- dynamic_column_column_free(&col);
- cleanup_arguments();
-
return str;
null:
@@ -4127,10 +4261,48 @@ bool Item_dyncol_get::get_dyn_value(DYNAMIC_COLUMN_VALUE *val, String *tmp)
{
DYNAMIC_COLUMN dyn_str;
String *res;
- longlong num;
+ longlong num= 0;
+ LEX_STRING buf, *name= NULL;
+ char nmstrbuf[11];
+ String nmbuf(nmstrbuf, sizeof(nmstrbuf), system_charset_info);
enum enum_dyncol_func_result rc;
- num= args[1]->val_int();
+ if (args[1]->result_type() == INT_RESULT)
+ num= args[1]->val_int();
+ else
+ {
+ String *nm= args[1]->val_str(&nmbuf);
+ if (!nm || args[1]->null_value)
+ {
+ null_value= 1;
+ return 1;
+ }
+
+ if (my_charset_same(nm->charset(), &my_charset_utf8_general_ci))
+ {
+ buf.str= (char *) nm->ptr();
+ buf.length= nm->length();
+ }
+ else
+ {
+ uint strlen;
+ uint dummy_errors;
+ buf.str= (char *)sql_alloc((strlen= nm->length() *
+ my_charset_utf8_general_ci.mbmaxlen + 1));
+ if (buf.str)
+ {
+ buf.length=
+ copy_and_convert(buf.str, strlen, &my_charset_utf8_general_ci,
+ nm->ptr(), nm->length(), nm->charset(),
+ &dummy_errors);
+ }
+ else
+ buf.length= 0;
+ }
+ name= &buf;
+ }
+
+
if (args[1]->null_value || num < 0 || num > INT_MAX)
{
null_value= 1;
@@ -4146,7 +4318,9 @@ bool Item_dyncol_get::get_dyn_value(DYNAMIC_COLUMN_VALUE *val, String *tmp)
dyn_str.str= (char*) res->ptr();
dyn_str.length= res->length();
- if ((rc= dynamic_column_get(&dyn_str, (uint) num, val)))
+ if ((rc= ((name == NULL) ?
+ mariadb_dyncol_get(&dyn_str, (uint) num, val) :
+ mariadb_dyncol_get_named(&dyn_str, name, val))))
{
dynamic_column_error_message(rc);
null_value= 1;
@@ -4178,6 +4352,7 @@ String *Item_dyncol_get::val_str(String *str_result)
case DYN_COL_DOUBLE:
str_result->set_real(val.x.double_value, NOT_FIXED_DEC, &my_charset_latin1);
break;
+ case DYN_COL_DYNCOL:
case DYN_COL_STRING:
if ((char*) tmp.ptr() <= val.x.string.value.str &&
(char*) tmp.ptr() + tmp.length() >= val.x.string.value.str)
@@ -4253,6 +4428,7 @@ longlong Item_dyncol_get::val_int()
return 0;
switch (val.type) {
+ case DYN_COL_DYNCOL:
case DYN_COL_NULL:
goto null;
case DYN_COL_UINT:
@@ -4333,6 +4509,7 @@ double Item_dyncol_get::val_real()
return 0.0;
switch (val.type) {
+ case DYN_COL_DYNCOL:
case DYN_COL_NULL:
goto null;
case DYN_COL_UINT:
@@ -4390,6 +4567,7 @@ my_decimal *Item_dyncol_get::val_decimal(my_decimal *decimal_value)
return NULL;
switch (val.type) {
+ case DYN_COL_DYNCOL:
case DYN_COL_NULL:
goto null;
case DYN_COL_UINT:
@@ -4449,6 +4627,7 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
return 1; // Error
switch (val.type) {
+ case DYN_COL_DYNCOL:
case DYN_COL_NULL:
goto null;
case DYN_COL_INT:
@@ -4494,7 +4673,6 @@ null:
return 1;
}
-
void Item_dyncol_get::print(String *str, enum_query_type query_type)
{
str->append(STRING_WITH_LEN("column_get("));
@@ -4509,7 +4687,8 @@ String *Item_func_dyncol_list::val_str(String *str)
{
uint i;
enum enum_dyncol_func_result rc;
- DYNAMIC_ARRAY arr;
+ LEX_STRING *names= 0;
+ uint count;
DYNAMIC_COLUMN col;
String *res= args[0]->val_str(str);
@@ -4518,33 +4697,37 @@ String *Item_func_dyncol_list::val_str(String *str)
col.length= res->length();
/* We do not change the string, so could do this trick */
col.str= (char *)res->ptr();
- if ((rc= dynamic_column_list(&col, &arr)))
+ if ((rc= mariadb_dyncol_list_named(&col, &count, &names)))
{
+ bzero(&col, sizeof(col));
dynamic_column_error_message(rc);
- delete_dynamic(&arr);
goto null;
}
+ bzero(&col, sizeof(col));
/*
- We support elements from 0 - 65536, so max size for one element is
- 6 (including ,).
+ We estimate average name length as 10
*/
- if (str->alloc(arr.elements * 6))
+ if (str->alloc(count * 13))
goto null;
str->length(0);
- for (i= 0; i < arr.elements; i++)
+ str->set_charset(&my_charset_utf8_general_ci);
+ for (i= 0; i < count; i++)
{
- str->qs_append(*dynamic_element(&arr, i, uint*));
- if (i < arr.elements - 1)
+ append_identifier(current_thd, str, names[i].str, names[i].length);
+ if (i < count - 1)
str->qs_append(',');
}
-
null_value= FALSE;
- delete_dynamic(&arr);
+ if (names)
+ my_free(names);
return str;
null:
null_value= TRUE;
+ if (names)
+ my_free(names);
return NULL;
}
+
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 786f66e3aab..a42240f1b35 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -1003,9 +1003,10 @@ class Item_func_dyncol_create: public Item_str_func
protected:
DYNCALL_CREATE_DEF *defs;
DYNAMIC_COLUMN_VALUE *vals;
- uint *nums;
- void prepare_arguments();
- void cleanup_arguments();
+ uint *keys_num;
+ LEX_STRING *keys_str;
+ bool names, force_names;
+ bool prepare_arguments(bool force_names);
void print_arguments(String *str, enum_query_type query_type);
public:
Item_func_dyncol_create(List<Item> &args, DYNCALL_CREATE_DEF *dfs);
@@ -1014,6 +1015,7 @@ public:
const char *func_name() const{ return "column_create"; }
String *val_str(String *);
virtual void print(String *str, enum_query_type query_type);
+ virtual enum Functype functype() const { return DYNCOL_FUNC; }
};
@@ -1028,6 +1030,19 @@ public:
virtual void print(String *str, enum_query_type query_type);
};
+class Item_func_dyncol_json: public Item_str_func
+{
+public:
+ Item_func_dyncol_json(Item *str) :Item_str_func(str) {}
+ const char *func_name() const{ return "column_json"; }
+ String *val_str(String *);
+ void fix_length_and_dec()
+ {
+ maybe_null= TRUE;
+ collation.set(&my_charset_bin);
+ decimals= 0;
+ }
+};
/*
The following functions is always called from an Item_cast function
@@ -1038,11 +1053,9 @@ class Item_dyncol_get: public Item_str_func
public:
Item_dyncol_get(Item *str, Item *num)
:Item_str_func(str, num)
- {
- max_length= MAX_DYNAMIC_COLUMN_LENGTH;
- }
+ {}
void fix_length_and_dec()
- { set_persist_maybe_null(1); }
+ { set_persist_maybe_null(1); max_length= MAX_BLOB_WIDTH; }
/* Mark that collation can change between calls */
bool dynamic_result() { return 1; }
@@ -1070,3 +1083,4 @@ public:
extern String my_empty_string;
#endif /* ITEM_STRFUNC_INCLUDED */
+
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index dbc4f9818d5..8b3ce257642 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -1922,7 +1922,7 @@ bool Item_allany_subselect::is_maxmin_applicable(JOIN *join)
WHERE condition.
*/
return (abort_on_null || (upper_item && upper_item->is_top_level_item())) &&
- !join->select_lex->master_unit()->uncacheable && !func->eqne_op();
+ !(join->select_lex->master_unit()->uncacheable & ~UNCACHEABLE_EXPLAIN) && !func->eqne_op();
}
@@ -5066,7 +5066,7 @@ bool Ordered_key::alloc_keys_buffers()
DBUG_ASSERT(key_buff_elements > 0);
if (!(key_buff= (rownum_t*) my_malloc((size_t)(key_buff_elements *
- sizeof(rownum_t)), MYF(MY_WME))))
+ sizeof(rownum_t)), MYF(MY_WME | MY_THREAD_SPECIFIC))))
return TRUE;
/*
@@ -5097,7 +5097,8 @@ int
Ordered_key::cmp_keys_by_row_data(ha_rows a, ha_rows b)
{
uchar *rowid_a, *rowid_b;
- int error, cmp_res;
+ int __attribute__((unused)) error;
+ int cmp_res;
/* The length in bytes of the rowids (positions) of tmp_table. */
uint rowid_length= tbl->file->ref_length;
@@ -5193,7 +5194,8 @@ int Ordered_key::cmp_key_with_search_key(rownum_t row_num)
/* The length in bytes of the rowids (positions) of tmp_table. */
uint rowid_length= tbl->file->ref_length;
uchar *cur_rowid= row_num_to_rowid + row_num * rowid_length;
- int error, cmp_res;
+ int __attribute__((unused)) error;
+ int cmp_res;
if ((error= tbl->file->ha_rnd_pos(tbl->record[0], cur_rowid)))
{
@@ -5491,7 +5493,7 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts,
!(null_bitmaps= (MY_BITMAP**) thd->alloc(merge_keys_count *
sizeof(MY_BITMAP*))) ||
!(row_num_to_rowid= (uchar*) my_malloc((size_t)(row_count * rowid_length),
- MYF(MY_WME))))
+ MYF(MY_WME | MY_THREAD_SPECIFIC))))
return TRUE;
/* Create the only non-NULL key if there is any. */
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 04358ac8c45..8816e1352a9 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -636,13 +636,24 @@ void Item_sum::cleanup()
@retval > 0 if key1 > key2
*/
-static int simple_str_key_cmp(void* arg, uchar* key1, uchar* key2)
+int simple_str_key_cmp(void* arg, uchar* key1, uchar* key2)
{
Field *f= (Field*) arg;
return f->cmp(key1, key2);
}
+C_MODE_START
+
+int count_distinct_walk(void *elem, element_count count, void *arg)
+{
+ (*((ulonglong*)arg))++;
+ return 0;
+}
+
+C_MODE_END
+
+
/**
Correctly compare composite keys.
@@ -710,13 +721,13 @@ C_MODE_START
/* Declarations for auxilary C-callbacks */
-static int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
+int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
{
return memcmp(key1, key2, *(uint *) arg);
}
-static int item_sum_distinct_walk(void *element, element_count num_of_dups,
+int item_sum_distinct_walk(void *element, element_count num_of_dups,
void *item)
{
return ((Aggregator_distinct*) (item))->unique_walk_function(element);
@@ -3480,7 +3491,8 @@ bool Item_func_group_concat::setup(THD *thd)
init_tree(tree, (uint) min(thd->variables.max_heap_table_size,
thd->variables.sortbuff_size/16), 0,
tree_key_length,
- group_concat_key_cmp_with_order , 0, NULL, (void*) this);
+ group_concat_key_cmp_with_order, NULL, (void*) this,
+ MYF(MY_THREAD_SPECIFIC));
}
if (distinct)
diff --git a/sql/keycaches.cc b/sql/keycaches.cc
index 26a39808c56..84ed67d00f0 100644
--- a/sql/keycaches.cc
+++ b/sql/keycaches.cc
@@ -159,7 +159,3 @@ bool process_key_caches(process_key_cache_t func, void *param)
return res != 0;
}
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class I_List_iterator<NAMED_ILINK>;
-#endif
-
diff --git a/sql/lex.h b/sql/lex.h
index 9bf4c439cb6..e1dc6150cc3 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -123,11 +123,10 @@ static SYMBOL symbols[] = {
{ "COLUMN_NAME", SYM(COLUMN_NAME_SYM)},
{ "COLUMNS", SYM(COLUMNS)},
{ "COLUMN_ADD", SYM(COLUMN_ADD_SYM)},
+ { "COLUMN_CHECK", SYM(COLUMN_CHECK_SYM)},
{ "COLUMN_CREATE", SYM(COLUMN_CREATE_SYM)},
{ "COLUMN_DELETE", SYM(COLUMN_DELETE_SYM)},
- { "COLUMN_EXISTS", SYM(COLUMN_EXISTS_SYM)},
{ "COLUMN_GET", SYM(COLUMN_GET_SYM)},
- { "COLUMN_LIST", SYM(COLUMN_LIST_SYM)},
{ "COMMENT", SYM(COMMENT_SYM)},
{ "COMMIT", SYM(COMMIT_SYM)},
{ "COMMITTED", SYM(COMMITTED_SYM)},
@@ -514,6 +513,7 @@ static SYMBOL symbols[] = {
{ "SIGNED", SYM(SIGNED_SYM)},
{ "SIMPLE", SYM(SIMPLE_SYM)},
{ "SLAVE", SYM(SLAVE)},
+ { "SLAVES", SYM(SLAVES)},
{ "SLOW", SYM(SLOW)},
{ "SNAPSHOT", SYM(SNAPSHOT_SYM)},
{ "SMALLINT", SYM(SMALLINT)},
diff --git a/sql/log.cc b/sql/log.cc
index 6c04055cfc1..0444499c72b 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -40,6 +40,7 @@
#include "rpl_rli.h"
#include "sql_audit.h"
#include "log_slow.h"
+#include "mysqld.h"
#include <my_dir.h>
#include <stdarg.h>
@@ -53,6 +54,7 @@
#include "rpl_handler.h"
#include "debug_sync.h"
#include "sql_show.h"
+#include "my_pthread.h"
/* max size of the log message */
#define MAX_LOG_BUFFER_SIZE 1024
@@ -106,6 +108,17 @@ static SHOW_VAR binlog_status_vars_detail[]=
{NullS, NullS, SHOW_LONG}
};
+/*
+ Variables for the binlog background thread.
+ Protected by the MYSQL_BIN_LOG::LOCK_binlog_background_thread mutex.
+ */
+static bool binlog_background_thread_started= false;
+static bool binlog_background_thread_stop= false;
+static MYSQL_BIN_LOG::xid_count_per_binlog *
+ binlog_background_thread_queue= NULL;
+
+static bool start_binlog_background_thread();
+
/**
purge logs, master and slave sides both, related error code
@@ -479,6 +492,14 @@ public:
*/
bool using_xa;
my_xid xa_xid;
+ bool need_unlog;
+ /*
+ Id of binlog that transaction was written to; only needed if need_unlog is
+ true.
+ */
+ ulong binlog_id;
+ /* Set if we get an error during commit that must be returned from unlog(). */
+ bool delayed_error;
private:
@@ -1660,6 +1681,20 @@ binlog_flush_cache(THD *thd, binlog_cache_mngr *cache_mngr,
end_ev, all,
using_stmt, using_trx);
}
+ else
+ {
+ /*
+ This can happen in row-format binlog with something like
+ BEGIN; INSERT INTO nontrans_table; INSERT IGNORE INTO trans_table;
+ The nontrans_table is written directly into the binlog before commit,
+ and if the trans_table is ignored there will be no rows to write when
+ we get here.
+
+ So there is no work to do. Therefore, we will not increment any XID
+ count, so we must not decrement any XID count in unlog().
+ */
+ cache_mngr->need_unlog= 0;
+ }
cache_mngr->reset(using_stmt, using_trx);
DBUG_ASSERT((!using_stmt || cache_mngr->stmt_cache.empty()) &&
@@ -2886,15 +2921,16 @@ const char *MYSQL_LOG::generate_name(const char *log_name,
MYSQL_BIN_LOG::MYSQL_BIN_LOG(uint *sync_period)
- :bytes_written(0), prepared_xids(0), file_id(1), open_count(1),
- need_start_event(TRUE),
+ :reset_master_pending(false),
+ bytes_written(0), file_id(1), open_count(1),
group_commit_queue(0), group_commit_queue_busy(FALSE),
num_commits(0), num_group_commits(0),
sync_period_ptr(sync_period), sync_counter(0),
is_relay_log(0), signal_cnt(0),
checksum_alg_reset(BINLOG_CHECKSUM_ALG_UNDEF),
relay_log_checksum_alg(BINLOG_CHECKSUM_ALG_UNDEF),
- description_event_for_exec(0), description_event_for_queue(0)
+ description_event_for_exec(0), description_event_for_queue(0),
+ current_binlog_id(0)
{
/*
We don't want to initialize locks here as such initialization depends on
@@ -2914,23 +2950,55 @@ void MYSQL_BIN_LOG::cleanup()
DBUG_ENTER("cleanup");
if (inited)
{
+ xid_count_per_binlog *b;
+
inited= 0;
close(LOG_CLOSE_INDEX|LOG_CLOSE_STOP_EVENT);
delete description_event_for_queue;
delete description_event_for_exec;
+
+ while ((b= binlog_xid_count_list.get()))
+ {
+ /*
+ There should be no pending XIDs at shutdown, and only one entry (for
+ the active binlog file) in the list.
+ */
+ DBUG_ASSERT(b->xid_count == 0);
+ DBUG_ASSERT(!binlog_xid_count_list.head());
+ my_free(b);
+ }
+
+ /* Wait for the binlog background thread to stop. */
+ if (!is_relay_log && binlog_background_thread_started)
+ {
+ mysql_mutex_lock(&LOCK_binlog_background_thread);
+ binlog_background_thread_stop= true;
+ mysql_cond_signal(&COND_binlog_background_thread);
+ while (binlog_background_thread_stop)
+ mysql_cond_wait(&COND_binlog_background_thread_end,
+ &LOCK_binlog_background_thread);
+ mysql_mutex_unlock(&LOCK_binlog_background_thread);
+ binlog_background_thread_started= false;
+ }
+
mysql_mutex_destroy(&LOCK_log);
mysql_mutex_destroy(&LOCK_index);
+ mysql_mutex_destroy(&LOCK_xid_list);
+ mysql_mutex_destroy(&LOCK_binlog_background_thread);
mysql_cond_destroy(&update_cond);
+ mysql_cond_destroy(&COND_queue_busy);
+ mysql_cond_destroy(&COND_xid_list);
+ mysql_cond_destroy(&COND_binlog_background_thread);
+ mysql_cond_destroy(&COND_binlog_background_thread_end);
}
DBUG_VOID_RETURN;
}
/* Init binlog-specific vars */
-void MYSQL_BIN_LOG::init(bool no_auto_events_arg, ulong max_size_arg)
+void MYSQL_BIN_LOG::init(ulong max_size_arg)
{
DBUG_ENTER("MYSQL_BIN_LOG::init");
- no_auto_events= no_auto_events_arg;
max_size= max_size_arg;
DBUG_PRINT("info",("max_size: %lu", max_size));
DBUG_VOID_RETURN;
@@ -2942,8 +3010,18 @@ void MYSQL_BIN_LOG::init_pthread_objects()
MYSQL_LOG::init_pthread_objects();
mysql_mutex_init(m_key_LOCK_index, &LOCK_index, MY_MUTEX_INIT_SLOW);
mysql_mutex_setflags(&LOCK_index, MYF_NO_DEADLOCK_DETECTION);
+ mysql_mutex_init(key_BINLOG_LOCK_xid_list,
+ &LOCK_xid_list, MY_MUTEX_INIT_FAST);
mysql_cond_init(m_key_update_cond, &update_cond, 0);
mysql_cond_init(m_key_COND_queue_busy, &COND_queue_busy, 0);
+ mysql_cond_init(key_BINLOG_COND_xid_list, &COND_xid_list, 0);
+
+ mysql_mutex_init(key_BINLOG_LOCK_binlog_background_thread,
+ &LOCK_binlog_background_thread, MY_MUTEX_INIT_FAST);
+ mysql_cond_init(key_BINLOG_COND_binlog_background_thread,
+ &COND_binlog_background_thread, 0);
+ mysql_cond_init(key_BINLOG_COND_binlog_background_thread_end,
+ &COND_binlog_background_thread_end, 0);
}
@@ -3031,16 +3109,20 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
enum_log_type log_type_arg,
const char *new_name,
enum cache_type io_cache_type_arg,
- bool no_auto_events_arg,
ulong max_size_arg,
bool null_created_arg,
bool need_mutex)
{
File file= -1;
+ xid_count_per_binlog *new_xid_list_entry= NULL, *b;
DBUG_ENTER("MYSQL_BIN_LOG::open");
DBUG_PRINT("enter",("log_type: %d",(int) log_type_arg));
+ if (!is_relay_log && !binlog_background_thread_started &&
+ start_binlog_background_thread())
+ DBUG_RETURN(1);
+
if (init_and_set_log_file_name(log_name, new_name, log_type_arg,
io_cache_type_arg))
{
@@ -3092,7 +3174,7 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
DBUG_RETURN(1); /* all warnings issued */
}
- init(no_auto_events_arg, max_size_arg);
+ init(max_size_arg);
open_count++;
@@ -3116,11 +3198,10 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
write_file_name_to_index_file= 1;
}
- if (need_start_event && !no_auto_events)
{
/*
- In 4.x we set need_start_event=0 here, but in 5.0 we want a Start event
- even if this is not the very first binlog.
+ In 4.x we put Start event only in the first binlog. But from 5.0 we
+ want a Start event even if this is not the very first binlog.
*/
Format_description_log_event s(BINLOG_VERSION);
/*
@@ -3147,6 +3228,60 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
if (s.write(&log_file))
goto err;
bytes_written+= s.data_written;
+
+ if (!is_relay_log)
+ {
+ char buf[FN_REFLEN];
+ /*
+ Construct an entry in the binlog_xid_count_list for the new binlog
+ file (we will not link it into the list until we know the new file
+ is successfully created; otherwise we would have to remove it again
+ if creation failed, which gets tricky since other threads may have
+ seen the entry in the meantime - and we do not want to hold
+ LOCK_xid_list for long periods of time).
+
+ Write the current binlog checkpoint into the log, so XA recovery will
+ know from where to start recovery.
+ */
+ uint off= dirname_length(log_file_name);
+ uint len= strlen(log_file_name) - off;
+ char *entry_mem, *name_mem;
+ if (!(new_xid_list_entry = (xid_count_per_binlog *)
+ my_multi_malloc(MYF(MY_WME),
+ &entry_mem, sizeof(xid_count_per_binlog),
+ &name_mem, len,
+ NULL)))
+ goto err;
+ memcpy(name_mem, log_file_name+off, len);
+ new_xid_list_entry->binlog_name= name_mem;
+ new_xid_list_entry->binlog_name_len= len;
+ new_xid_list_entry->xid_count= 0;
+
+ /*
+ Find the name for the Initial binlog checkpoint.
+
+ Normally this will just be the first entry, as we delete entries
+ when their count drops to zero. But we scan the list to handle any
+ corner case, eg. for the first binlog file opened after startup, the
+ list will be empty.
+ */
+ mysql_mutex_lock(&LOCK_xid_list);
+ I_List_iterator<xid_count_per_binlog> it(binlog_xid_count_list);
+ while ((b= it++) && b->xid_count == 0)
+ ;
+ mysql_mutex_unlock(&LOCK_xid_list);
+ if (!b)
+ b= new_xid_list_entry;
+ strmake(buf, b->binlog_name, b->binlog_name_len);
+ Binlog_checkpoint_log_event ev(buf, len);
+ DBUG_EXECUTE_IF("crash_before_write_checkpoint_event",
+ flush_io_cache(&log_file);
+ mysql_file_sync(log_file.file, MYF(MY_WME));
+ DBUG_SUICIDE(););
+ if (ev.write(&log_file))
+ goto err;
+ bytes_written+= ev.data_written;
+ }
}
if (description_event_for_queue &&
description_event_for_queue->binlog_version>=4)
@@ -3218,6 +3353,23 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
#endif
}
}
+
+ if (!is_relay_log)
+ {
+ /*
+ Now the file was created successfully, so we can link in the entry for
+ the new binlog file in binlog_xid_count_list.
+ */
+ mysql_mutex_lock(&LOCK_xid_list);
+ ++current_binlog_id;
+ new_xid_list_entry->binlog_id= current_binlog_id;
+ /* Remove any initial entries with no pending XIDs. */
+ while ((b= binlog_xid_count_list.head()) && b->xid_count == 0)
+ my_free(binlog_xid_count_list.get());
+ binlog_xid_count_list.push_back(new_xid_list_entry);
+ mysql_mutex_unlock(&LOCK_xid_list);
+ }
+
log_state= LOG_OPENED;
#ifdef HAVE_REPLICATION
@@ -3236,6 +3388,8 @@ err:
Turning logging off for the whole duration of the MySQL server process. \
To turn it on again: fix the cause, \
shutdown the MySQL server and restart it.", name, errno);
+ if (new_xid_list_entry)
+ my_free(new_xid_list_entry);
if (file >= 0)
mysql_file_close(file, MYF(0));
close(LOG_CLOSE_INDEX);
@@ -3481,11 +3635,11 @@ err:
/**
Delete all logs refered to in the index file.
- Start writing to a new log file.
The new index file will only contain this file.
- @param thd Thread
+ @param thd Thread
+ @param create_new_log 1 if we should start writing to a new log file
@note
If not called from slave thread, write start event to new log
@@ -3496,7 +3650,7 @@ err:
1 error
*/
-bool MYSQL_BIN_LOG::reset_logs(THD* thd)
+bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log)
{
LOG_INFO linfo;
bool error=0;
@@ -3512,6 +3666,67 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd)
mysql_mutex_lock(&LOCK_log);
mysql_mutex_lock(&LOCK_index);
+ if (!is_relay_log)
+ {
+ /*
+ Mark that a RESET MASTER is in progress.
+ This ensures that a binlog checkpoint will not try to write binlog
+ checkpoint events, which would be useless (as we are deleting the binlog
+ anyway) and could deadlock, as we are holding LOCK_log.
+ */
+ mysql_mutex_lock(&LOCK_xid_list);
+ reset_master_pending= true;
+ mysql_mutex_unlock(&LOCK_xid_list);
+
+ /*
+ We are going to nuke all binary log files.
+ Without binlog, we cannot XA recover prepared-but-not-committed
+ transactions in engines. So force a commit checkpoint first.
+
+ Note that we take and immediately release LOCK_commit_ordered. This has
+ the effect to ensure that any on-going group commit (in
+ trx_group_commit_leader()) has completed before we request the checkpoint,
+ due to the chaining of LOCK_log and LOCK_commit_ordered in that function.
+ (We are holding LOCK_log, so no new group commit can start).
+
+ Without this, it is possible (though perhaps unlikely) that the RESET
+ MASTER could run in-between the write to the binlog and the
+ commit_ordered() in the engine of some transaction, and then a crash
+ later would leave such transaction not recoverable.
+ */
+ mysql_mutex_lock(&LOCK_commit_ordered);
+ mysql_mutex_unlock(&LOCK_commit_ordered);
+
+ mark_xids_active(current_binlog_id, 1);
+ do_checkpoint_request(current_binlog_id);
+
+ /* Now wait for all checkpoint requests and pending unlog() to complete. */
+ mysql_mutex_lock(&LOCK_xid_list);
+ xid_count_per_binlog *b;
+ for (;;)
+ {
+ I_List_iterator<xid_count_per_binlog> it(binlog_xid_count_list);
+ while ((b= it++))
+ {
+ if (b->xid_count > 0)
+ break;
+ }
+ if (!b)
+ break; /* No more pending XIDs */
+ /*
+ Wait until signalled that one more binlog dropped to zero, then check
+ again.
+ */
+ mysql_cond_wait(&COND_xid_list, &LOCK_xid_list);
+ }
+
+ /*
+ Now all XIDs are fully flushed to disk, and we are holding LOCK_log so
+ no new ones will be written. So we can proceed to delete the logs.
+ */
+ mysql_mutex_unlock(&LOCK_xid_list);
+ }
+
/*
The following mutex is needed to ensure that no threads call
'delete thd' as we would then risk missing a 'rollback' from this
@@ -3602,10 +3817,8 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd)
goto err;
}
}
- if (!thd->slave_thread)
- need_start_event=1;
- if (!open_index_file(index_file_name, 0, FALSE))
- if ((error= open(save_name, log_type, 0, io_cache_type, no_auto_events, max_size, 0, FALSE)))
+ if (create_new_log && !open_index_file(index_file_name, 0, FALSE))
+ if ((error= open(save_name, log_type, 0, io_cache_type, max_size, 0, FALSE)))
goto err;
my_free((void *) save_name);
@@ -3613,6 +3826,31 @@ err:
if (error == 1)
name= const_cast<char*>(save_name);
mysql_mutex_unlock(&LOCK_thread_count);
+
+ if (!is_relay_log)
+ {
+ xid_count_per_binlog *b;
+ /*
+ Remove all entries in the xid_count list except the last.
+ Normally we will just be deleting all the entries that we waited for to
+ drop to zero above. But if we fail during RESET MASTER for some reason
+ then we will not have created any new log file, and we may keep the last
+ of the old entries.
+ */
+ mysql_mutex_lock(&LOCK_xid_list);
+ for (;;)
+ {
+ b= binlog_xid_count_list.head();
+ DBUG_ASSERT(b /* List can never become empty. */);
+ if (b->binlog_id == current_binlog_id)
+ break;
+ DBUG_ASSERT(b->xid_count == 0);
+ my_free(binlog_xid_count_list.get());
+ }
+ reset_master_pending= false;
+ mysql_mutex_unlock(&LOCK_xid_list);
+ }
+
mysql_mutex_unlock(&LOCK_index);
mysql_mutex_unlock(&LOCK_log);
DBUG_RETURN(error);
@@ -3665,7 +3903,7 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
DBUG_ENTER("purge_first_log");
DBUG_ASSERT(is_open());
- DBUG_ASSERT(rli->slave_running == 1);
+ DBUG_ASSERT(rli->slave_running == MYSQL_SLAVE_RUN_NOT_CONNECT);
DBUG_ASSERT(!strcmp(rli->linfo.log_file_name,rli->event_relay_log_name));
mysql_mutex_lock(&LOCK_index);
@@ -3822,8 +4060,7 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
if ((error=find_log_pos(&log_info, NullS, 0 /*no mutex*/)))
goto err;
while ((strcmp(to_log,log_info.log_file_name) || (exit_loop=included)) &&
- !is_active(log_info.log_file_name) &&
- !log_in_use(log_info.log_file_name))
+ can_purge_log(log_info.log_file_name))
{
if ((error= register_purge_index_entry(log_info.log_file_name)))
{
@@ -4173,8 +4410,7 @@ int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time)
goto err;
while (strcmp(log_file_name, log_info.log_file_name) &&
- !is_active(log_info.log_file_name) &&
- !log_in_use(log_info.log_file_name))
+ can_purge_log(log_info.log_file_name))
{
if (!mysql_file_stat(m_key_file_log,
log_info.log_file_name, &stat_area, MYF(0)))
@@ -4229,6 +4465,28 @@ err:
mysql_mutex_unlock(&LOCK_index);
DBUG_RETURN(error);
}
+
+
+bool
+MYSQL_BIN_LOG::can_purge_log(const char *log_file_name)
+{
+ xid_count_per_binlog *b;
+
+ if (is_active(log_file_name))
+ return false;
+ mysql_mutex_lock(&LOCK_xid_list);
+ {
+ I_List_iterator<xid_count_per_binlog> it(binlog_xid_count_list);
+ while ((b= it++) &&
+ 0 != strncmp(log_file_name+dirname_length(log_file_name),
+ b->binlog_name, b->binlog_name_len))
+ ;
+ }
+ mysql_mutex_unlock(&LOCK_xid_list);
+ if (b)
+ return false;
+ return !log_in_use(log_file_name);
+}
#endif /* HAVE_REPLICATION */
@@ -4322,26 +4580,6 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
mysql_mutex_assert_owner(&LOCK_log);
mysql_mutex_assert_owner(&LOCK_index);
- /*
- if binlog is used as tc log, be sure all xids are "unlogged",
- so that on recover we only need to scan one - latest - binlog file
- for prepared xids. As this is expected to be a rare event,
- simple wait strategy is enough. We're locking LOCK_log to be sure no
- new Xid_log_event's are added to the log (and prepared_xids is not
- increased), and waiting on COND_prep_xids for late threads to
- catch up.
- */
- if (prepared_xids)
- {
- tc_log_page_waits++;
- mysql_mutex_lock(&LOCK_prep_xids);
- while (prepared_xids) {
- DBUG_PRINT("info", ("prepared_xids=%lu", prepared_xids));
- mysql_cond_wait(&COND_prep_xids, &LOCK_prep_xids);
- }
- mysql_mutex_unlock(&LOCK_prep_xids);
- }
-
/* Reuse old name if not binlog and not update log */
new_name_ptr= name;
@@ -4356,7 +4594,6 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
if (log_type == LOG_BIN)
{
- if (!no_auto_events)
{
/*
We log the whole file name for log file as the user may decide
@@ -4431,7 +4668,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
/* reopen the binary log file. */
file_to_open= new_name_ptr;
error= open(old_name, log_type, new_name_ptr, io_cache_type,
- no_auto_events, max_size, 1, FALSE);
+ max_size, 1, FALSE);
}
/* handle reopening errors */
@@ -4503,7 +4740,7 @@ bool MYSQL_BIN_LOG::append(Log_event* ev)
DBUG_PRINT("info",("max_size: %lu",max_size));
if (flush_and_sync(0))
goto err;
- if ((uint) my_b_append_tell(&log_file) > max_size)
+ if (my_b_append_tell(&log_file) > max_size)
error= new_file_without_locking();
err:
mysql_mutex_unlock(&LOCK_log);
@@ -4534,7 +4771,7 @@ bool MYSQL_BIN_LOG::appendv(const char* buf, uint len,...)
DBUG_PRINT("info",("max_size: %lu",max_size));
if (flush_and_sync(0))
goto err;
- if ((uint) my_b_append_tell(&log_file) > max_size)
+ if (my_b_append_tell(&log_file) > max_size)
error= new_file_without_locking();
err:
if (!error)
@@ -5056,6 +5293,8 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info, my_bool *with_annotate)
bool is_trans_cache= FALSE;
bool using_trans= event_info->use_trans_cache();
bool direct= event_info->use_direct_logging();
+ ulong prev_binlog_id;
+ LINT_INIT(prev_binlog_id);
if (thd->binlog_evt_union.do_union)
{
@@ -5107,6 +5346,7 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info, my_bool *with_annotate)
file= &log_file;
my_org_b_tell= my_b_tell(file);
mysql_mutex_lock(&LOCK_log);
+ prev_binlog_id= current_binlog_id;
}
else
{
@@ -5250,7 +5490,7 @@ err:
mysql_mutex_unlock(&LOCK_log);
if (check_purge)
- purge();
+ checkpoint_and_purge(prev_binlog_id);
}
if (error)
@@ -5335,6 +5575,60 @@ bool general_log_write(THD *thd, enum enum_server_command command,
return FALSE;
}
+
+static void
+binlog_checkpoint_callback(void *cookie)
+{
+ MYSQL_BIN_LOG::xid_count_per_binlog *entry=
+ (MYSQL_BIN_LOG::xid_count_per_binlog *)cookie;
+ /*
+ For every supporting engine, we increment the xid_count and issue a
+ commit_checkpoint_request(). Then we can count when all
+ commit_checkpoint_notify() callbacks have occured, and then log a new
+ binlog checkpoint event.
+ */
+ mysql_bin_log.mark_xids_active(entry->binlog_id, 1);
+}
+
+
+/*
+ Request a commit checkpoint from each supporting engine.
+ This must be called after each binlog rotate, and after LOCK_log has been
+ released. The xid_count value in the xid_count_per_binlog entry was
+ incremented by 1 and will be decremented in this function; this ensures
+ that the entry will not go away early despite LOCK_log not being held.
+*/
+void
+MYSQL_BIN_LOG::do_checkpoint_request(ulong binlog_id)
+{
+ xid_count_per_binlog *entry;
+
+ /*
+ Find the binlog entry, and invoke commit_checkpoint_request() on it in
+ each supporting storage engine.
+ */
+ mysql_mutex_lock(&LOCK_xid_list);
+ I_List_iterator<xid_count_per_binlog> it(binlog_xid_count_list);
+ do {
+ entry= it++;
+ DBUG_ASSERT(entry /* binlog_id is always somewhere in the list. */);
+ } while (entry->binlog_id != binlog_id);
+ mysql_mutex_unlock(&LOCK_xid_list);
+
+ ha_commit_checkpoint_request(entry, binlog_checkpoint_callback);
+ /*
+ When we rotated the binlog, we incremented xid_count to make sure the
+ entry would not go away until this point, where we have done all necessary
+ commit_checkpoint_request() calls.
+ So now we can (and must) decrease the count - when it reaches zero, we
+ will know that both all pending unlog() and all pending
+ commit_checkpoint_notify() calls are done, and we can log a new binlog
+ checkpoint.
+ */
+ mark_xid_done(binlog_id, true);
+}
+
+
/**
The method executes rotation when LOCK_log is already acquired
by the caller.
@@ -5343,6 +5637,15 @@ bool general_log_write(THD *thd, enum enum_server_command command,
@param check_purge is set to true if rotation took place
@note
+ Caller _must_ check the check_purge variable. If this is set, it means
+ that the binlog was rotated, and caller _must_ ensure that
+ do_checkpoint_request() is called later with the binlog_id of the rotated
+ binlog file. The call to do_checkpoint_request() must happen after
+ LOCK_log is released (which is why we cannot simply do it here).
+ Usually, checkpoint_and_purge() is appropriate, as it will both handle
+ the checkpointing and any needed purging of old logs.
+
+ @note
If rotation fails, for instance the server was unable
to create a new log file, we still try to write an
incident event to the current log.
@@ -5360,7 +5663,27 @@ int MYSQL_BIN_LOG::rotate(bool force_rotate, bool* check_purge)
if (force_rotate || (my_b_tell(&log_file) >= (my_off_t) max_size))
{
+ ulong binlog_id= current_binlog_id;
+ /*
+ We rotate the binlog, so we need to start a commit checkpoint in all
+ supporting engines - when it finishes, we can log a new binlog checkpoint
+ event.
+
+ But we cannot start the checkpoint here - there could be a group commit
+ still in progress which needs to be included in the checkpoint, and
+ besides we do not want to do the (possibly expensive) checkpoint while
+ LOCK_log is held.
+
+ On the other hand, we must be sure that the xid_count entry for the
+ previous log does not go away until we start the checkpoint - which it
+ could do as it is no longer the most recent. So we increment xid_count
+ (to count the pending checkpoint request) - this will fix the entry in
+ place until we decrement again in do_checkpoint_request().
+ */
+ mark_xids_active(binlog_id, 1);
+
if ((error= new_file_without_locking()))
+ {
/**
Be conservative... There are possible lost events (eg,
failing to log the Execute_load_query_log_event
@@ -5373,7 +5696,14 @@ int MYSQL_BIN_LOG::rotate(bool force_rotate, bool* check_purge)
if (!write_incident_already_locked(current_thd))
flush_and_sync(0);
- *check_purge= true;
+ /*
+ We failed to rotate - so we have to decrement the xid_count back that
+ we incremented before attempting the rotate.
+ */
+ mark_xid_done(binlog_id, false);
+ }
+ else
+ *check_purge= true;
}
DBUG_RETURN(error);
}
@@ -5401,6 +5731,13 @@ void MYSQL_BIN_LOG::purge()
#endif
}
+
+void MYSQL_BIN_LOG::checkpoint_and_purge(ulong binlog_id)
+{
+ do_checkpoint_request(binlog_id);
+ purge();
+}
+
/**
The method is a shortcut of @c rotate() and @c purge().
LOCK_log is acquired prior to rotate and is released after it.
@@ -5413,11 +5750,13 @@ void MYSQL_BIN_LOG::purge()
int MYSQL_BIN_LOG::rotate_and_purge(bool force_rotate)
{
int error= 0;
+ ulong prev_binlog_id;
DBUG_ENTER("MYSQL_BIN_LOG::rotate_and_purge");
bool check_purge= false;
//todo: fix the macro def and restore safe_mutex_assert_not_owner(&LOCK_log);
mysql_mutex_lock(&LOCK_log);
+ prev_binlog_id= current_binlog_id;
if ((error= rotate(force_rotate, &check_purge)))
check_purge= false;
/*
@@ -5427,7 +5766,7 @@ int MYSQL_BIN_LOG::rotate_and_purge(bool force_rotate)
mysql_mutex_unlock(&LOCK_log);
if (check_purge)
- purge();
+ checkpoint_and_purge(prev_binlog_id);
DBUG_RETURN(error);
}
@@ -5758,11 +6097,13 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd)
uint error= 0;
my_off_t offset;
bool check_purge= false;
+ ulong prev_binlog_id;
DBUG_ENTER("MYSQL_BIN_LOG::write_incident");
mysql_mutex_lock(&LOCK_log);
if (likely(is_open()))
{
+ prev_binlog_id= current_binlog_id;
if (!(error= write_incident_already_locked(thd)) &&
!(error= flush_and_sync(0)))
{
@@ -5782,12 +6123,51 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd)
mysql_mutex_unlock(&LOCK_log);
if (check_purge)
- purge();
+ checkpoint_and_purge(prev_binlog_id);
}
DBUG_RETURN(error);
}
+void
+MYSQL_BIN_LOG::write_binlog_checkpoint_event_already_locked(const char *name,
+ uint len)
+{
+ my_off_t offset;
+ Binlog_checkpoint_log_event ev(name, len);
+ /*
+ Note that we must sync the binlog checkpoint to disk.
+ Otherwise a subsequent log purge could delete binlogs that XA recovery
+ thinks are needed (even though they are not really).
+ */
+ if (!ev.write(&log_file) && !flush_and_sync(0))
+ {
+ signal_update();
+ }
+ else
+ {
+ /*
+ If we fail to write the checkpoint event, something is probably really
+ bad with the binlog. We complain in the error log.
+
+ Note that failure to write binlog checkpoint does not compromise the
+ ability to do crash recovery - crash recovery will just have to scan a
+ bit more of the binlog than strictly necessary.
+ */
+ sql_print_error("Failed to write binlog checkpoint event to binary log\n");
+ }
+
+ offset= my_b_tell(&log_file);
+ /*
+ Take mutex to protect against a reader seeing partial writes of 64-bit
+ offset on 32-bit CPUs.
+ */
+ mysql_mutex_lock(&LOCK_commit_ordered);
+ last_commit_pos_offset= offset;
+ mysql_mutex_unlock(&LOCK_commit_ordered);
+}
+
+
/**
Write a cached log entry to the binary log.
- To support transaction over replication, we wrap the transaction
@@ -5820,6 +6200,7 @@ MYSQL_BIN_LOG::write_transaction_to_binlog(THD *thd,
bool using_trx_cache)
{
group_commit_entry entry;
+ Ha_trx_info *ha_info;
DBUG_ENTER("MYSQL_BIN_LOG::write_transaction_to_binlog");
entry.thd= thd;
@@ -5828,6 +6209,15 @@ MYSQL_BIN_LOG::write_transaction_to_binlog(THD *thd,
entry.all= all;
entry.using_stmt_cache= using_stmt_cache;
entry.using_trx_cache= using_trx_cache;
+ entry.need_unlog= false;
+ ha_info= all ? thd->transaction.all.ha_list : thd->transaction.stmt.ha_list;
+ for (; ha_info; ha_info= ha_info->next())
+ {
+ if (ha_info->is_started() && ha_info->ht() != binlog_hton &&
+ !ha_info->ht()->commit_checkpoint_request)
+ entry.need_unlog= true;
+ break;
+ }
/*
Log "BEGIN" at the beginning of every transaction. Here, a transaction is
@@ -5916,6 +6306,18 @@ MYSQL_BIN_LOG::write_transaction_to_binlog_events(group_commit_entry *entry)
{
next->thd->signal_wakeup_ready();
}
+ else
+ {
+ /*
+ If we rotated the binlog, and if we are using the unoptimized thread
+ scheduling where every thread runs its own commit_ordered(), then we
+ must do the commit checkpoint and log purge here, after all
+ commit_ordered() calls have finished, and locks have been released.
+ */
+ if (entry->check_purge)
+ checkpoint_and_purge(entry->binlog_id);
+ }
+
}
if (likely(!entry->error))
@@ -5946,8 +6348,9 @@ MYSQL_BIN_LOG::write_transaction_to_binlog_events(group_commit_entry *entry)
we need to mark it as not needed for recovery (unlog() is not called
for a transaction if log_xid() fails).
*/
- if (entry->cache_mngr->using_xa && entry->cache_mngr->xa_xid)
- mark_xid_done();
+ if (entry->cache_mngr->using_xa && entry->cache_mngr->xa_xid &&
+ entry->cache_mngr->need_unlog)
+ mark_xid_done(entry->cache_mngr->binlog_id, true);
return 1;
}
@@ -5967,14 +6370,13 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
{
uint xid_count= 0;
my_off_t UNINIT_VAR(commit_offset);
- group_commit_entry *current;
- group_commit_entry *last_in_queue;
+ group_commit_entry *current, *last_in_queue;
group_commit_entry *queue= NULL;
bool check_purge= false;
+ ulong binlog_id;
DBUG_ENTER("MYSQL_BIN_LOG::trx_group_commit_leader");
+ LINT_INIT(binlog_id);
- DBUG_ASSERT(is_open());
- if (likely(is_open())) // Should always be true
{
/*
Lock the LOCK_log(), and once we get it, collect any additional writes
@@ -5982,6 +6384,7 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
*/
mysql_mutex_lock(&LOCK_log);
DEBUG_SYNC(leader->thd, "commit_after_get_LOCK_log");
+ binlog_id= current_binlog_id;
mysql_mutex_lock(&LOCK_prepare_ordered);
current= group_commit_queue;
@@ -6000,7 +6403,11 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
DBUG_ASSERT(leader == queue /* the leader should be first in queue */);
/* Now we have in queue the list of transactions to be committed in order. */
+ }
+ DBUG_ASSERT(is_open());
+ if (likely(is_open())) // Should always be true
+ {
/*
Commit every transaction in the queue.
@@ -6028,7 +6435,24 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
commit_offset= my_b_write_tell(&log_file);
cache_mngr->last_commit_pos_offset= commit_offset;
if (cache_mngr->using_xa && cache_mngr->xa_xid)
- xid_count++;
+ {
+ /*
+ If all storage engines support commit_checkpoint_request(), then we
+ do not need to keep track of when this XID is durably committed.
+ Instead we will just ask the storage engine to durably commit all its
+ XIDs when we rotate a binlog file.
+ */
+ if (current->need_unlog)
+ {
+ xid_count++;
+ cache_mngr->need_unlog= true;
+ cache_mngr->binlog_id= binlog_id;
+ }
+ else
+ cache_mngr->need_unlog= false;
+
+ cache_mngr->delayed_error= false;
+ }
}
bool synced= 0;
@@ -6071,30 +6495,34 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
}
/*
- if any commit_events are Xid_log_event, increase the number of
- prepared_xids (it's decreased in ::unlog()). Binlog cannot be rotated
- if there're prepared xids in it - see the comment in new_file() for
- an explanation.
- If no Xid_log_events (then it's all Query_log_event) rotate binlog,
- if necessary.
+ If any commit_events are Xid_log_event, increase the number of pending
+ XIDs in current binlog (it's decreased in ::unlog()). When the count in
+ a (not active) binlog file reaches zero, we know that it is no longer
+ needed in XA recovery, and we can log a new binlog checkpoint event.
*/
if (xid_count > 0)
{
- mark_xids_active(xid_count);
+ mark_xids_active(binlog_id, xid_count);
}
- else
+
+ if (rotate(false, &check_purge))
{
- if (rotate(false, &check_purge))
- {
- /*
- If we fail to rotate, which thread should get the error?
- We give the error to the *last* transaction thread; that seems to
- make the most sense, as it was the last to write to the log.
- */
- last_in_queue->error= ER_ERROR_ON_WRITE;
- last_in_queue->commit_errno= errno;
- check_purge= false;
- }
+ /*
+ If we fail to rotate, which thread should get the error?
+ We give the error to the leader, as any my_error() thrown inside
+ rotate() will have been registered for the leader THD.
+
+ However we must not return error from here - that would cause
+ ha_commit_trans() to abort and rollback the transaction, which would
+ leave an inconsistent state with the transaction committed in the
+ binlog but rolled back in the engine.
+
+ Instead set a flag so that we can return error later, from unlog(),
+ when the transaction has been safely committed in the engine.
+ */
+ leader->cache_mngr->delayed_error= true;
+ my_error(ER_ERROR_ON_WRITE, MYF(ME_NOREFRESH), name, errno);
+ check_purge= false;
}
}
@@ -6109,9 +6537,6 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
*/
mysql_mutex_unlock(&LOCK_log);
- if (check_purge)
- purge();
-
DEBUG_SYNC(leader->thd, "commit_after_release_LOCK_log");
++num_group_commits;
@@ -6129,6 +6554,15 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
mysql_cond_wait(&COND_queue_busy, &LOCK_commit_ordered);
group_commit_queue_busy= TRUE;
+ /*
+ Set these so parent can run checkpoint_and_purge() in last thread.
+ (When using optimized thread scheduling, we run checkpoint_and_purge()
+ in this function, so parent does not need to and we need not set these
+ values).
+ */
+ last_in_queue->check_purge= check_purge;
+ last_in_queue->binlog_id= binlog_id;
+
/* Note that we return with LOCK_commit_ordered locked! */
DBUG_VOID_RETURN;
}
@@ -6144,7 +6578,8 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
DEBUG_SYNC(leader->thd, "commit_loop_entry_commit_ordered");
++num_commits;
- if (current->cache_mngr->using_xa && !current->error)
+ if (current->cache_mngr->using_xa && !current->error &&
+ DBUG_EVALUATE_IF("skip_commit_ordered", 0, 1))
run_commit_ordered(current->thd, current->all);
/*
@@ -6158,6 +6593,10 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
}
DEBUG_SYNC(leader->thd, "commit_after_group_run_commit_ordered");
mysql_mutex_unlock(&LOCK_commit_ordered);
+ DEBUG_SYNC(leader->thd, "commit_after_group_release_commit_ordered");
+
+ if (check_purge)
+ checkpoint_and_purge(binlog_id);
DBUG_VOID_RETURN;
}
@@ -6317,7 +6756,7 @@ void MYSQL_BIN_LOG::close(uint exiting)
if (log_state == LOG_OPENED)
{
#ifdef HAVE_REPLICATION
- if (log_type == LOG_BIN && !no_auto_events &&
+ if (log_type == LOG_BIN &&
(exiting & LOG_CLOSE_STOP_EVENT))
{
Stop_log_event s;
@@ -6575,16 +7014,33 @@ static void print_buffer_to_file(enum loglevel level, const char *buffer,
time_t skr;
struct tm tm_tmp;
struct tm *start;
+ THD *thd;
+ int tag_length= 0;
+ char tag[NAME_LEN];
DBUG_ENTER("print_buffer_to_file");
DBUG_PRINT("enter",("buffer: %s", buffer));
+ if (mysqld_server_initialized && (thd= current_thd))
+ {
+ if (thd->connection_name.length)
+ {
+ /*
+ Add tag for slaves so that the user can see from which connection
+ the error originates.
+ */
+ tag_length= my_snprintf(tag, sizeof(tag), ER(ER_MASTER_LOG_PREFIX),
+ (int) thd->connection_name.length,
+ thd->connection_name.str);
+ }
+ }
+
mysql_mutex_lock(&LOCK_error_log);
skr= my_time(0);
localtime_r(&skr, &tm_tmp);
start=&tm_tmp;
- fprintf(stderr, "%02d%02d%02d %2d:%02d:%02d [%s] %.*s\n",
+ fprintf(stderr, "%02d%02d%02d %2d:%02d:%02d [%s] %.*s%.*s\n",
start->tm_year % 100,
start->tm_mon+1,
start->tm_mday,
@@ -6593,6 +7049,7 @@ static void print_buffer_to_file(enum loglevel level, const char *buffer,
start->tm_sec,
(level == ERROR_LEVEL ? "ERROR" : level == WARNING_LEVEL ?
"Warning" : "Note"),
+ tag_length, tag,
(int) length, buffer);
fflush(stderr);
@@ -6951,6 +7408,8 @@ int TC_LOG_MMAP::open(const char *opt_name)
mysql_mutex_init(key_LOCK_sync, &LOCK_sync, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_active, &LOCK_active, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_pool, &LOCK_pool, MY_MUTEX_INIT_FAST);
+ mysql_mutex_init(key_LOCK_pending_checkpoint, &LOCK_pending_checkpoint,
+ MY_MUTEX_INIT_FAST);
mysql_cond_init(key_COND_active, &COND_active, 0);
mysql_cond_init(key_COND_pool, &COND_pool, 0);
mysql_cond_init(key_TC_LOG_MMAP_COND_queue_busy, &COND_queue_busy, 0);
@@ -7201,17 +7660,93 @@ int TC_LOG_MMAP::sync()
return err;
}
+static void
+mmap_do_checkpoint_callback(void *data)
+{
+ TC_LOG_MMAP::pending_cookies *pending=
+ static_cast<TC_LOG_MMAP::pending_cookies *>(data);
+ ++pending->pending_count;
+}
+
+int TC_LOG_MMAP::unlog(ulong cookie, my_xid xid)
+{
+ pending_cookies *full_buffer= NULL;
+ DBUG_ASSERT(*(my_xid *)(data+cookie) == xid);
+
+ /*
+ Do not delete the entry immediately, as there may be participating storage
+ engines which implement commit_checkpoint_request(), and thus have not yet
+ flushed the commit durably to disk.
+
+ Instead put it in a queue - and periodically, we will request a checkpoint
+ from all engines and delete a whole batch at once.
+ */
+ mysql_mutex_lock(&LOCK_pending_checkpoint);
+ if (pending_checkpoint == NULL)
+ {
+ uint32 size= sizeof(*pending_checkpoint);
+ if (!(pending_checkpoint=
+ (pending_cookies *)my_malloc(size, MYF(MY_ZEROFILL))))
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), size);
+ mysql_mutex_unlock(&LOCK_pending_checkpoint);
+ return 1;
+ }
+ }
+
+ pending_checkpoint->cookies[pending_checkpoint->count++]= cookie;
+ if (pending_checkpoint->count == sizeof(pending_checkpoint->cookies) /
+ sizeof(pending_checkpoint->cookies[0]))
+ {
+ full_buffer= pending_checkpoint;
+ pending_checkpoint= NULL;
+ }
+ mysql_mutex_unlock(&LOCK_pending_checkpoint);
+
+ if (full_buffer)
+ {
+ /*
+ We do an extra increment and notify here - this ensures that
+ things work also if there are no engines at all that support
+ commit_checkpoint_request.
+ */
+ ++full_buffer->pending_count;
+ ha_commit_checkpoint_request(full_buffer, mmap_do_checkpoint_callback);
+ commit_checkpoint_notify(full_buffer);
+ }
+ return 0;
+}
+
+
+void
+TC_LOG_MMAP::commit_checkpoint_notify(void *cookie)
+{
+ uint count;
+ pending_cookies *pending= static_cast<pending_cookies *>(cookie);
+ mysql_mutex_lock(&LOCK_pending_checkpoint);
+ DBUG_ASSERT(pending->pending_count > 0);
+ count= --pending->pending_count;
+ mysql_mutex_unlock(&LOCK_pending_checkpoint);
+ if (count == 0)
+ {
+ uint i;
+ for (i= 0; i < sizeof(pending->cookies)/sizeof(pending->cookies[0]); ++i)
+ delete_entry(pending->cookies[i]);
+ my_free(pending);
+ }
+}
+
+
/**
erase xid from the page, update page free space counters/pointers.
cookie points directly to the memory where xid was logged.
*/
-int TC_LOG_MMAP::unlog(ulong cookie, my_xid xid)
+int TC_LOG_MMAP::delete_entry(ulong cookie)
{
PAGE *p=pages+(cookie/tc_log_page_size);
my_xid *x=(my_xid *)(data+cookie);
- DBUG_ASSERT(*x == xid);
DBUG_ASSERT(x >= p->start && x < p->end);
mysql_mutex_lock(&p->lock);
@@ -7235,6 +7770,7 @@ void TC_LOG_MMAP::close()
mysql_mutex_destroy(&LOCK_sync);
mysql_mutex_destroy(&LOCK_active);
mysql_mutex_destroy(&LOCK_pool);
+ mysql_mutex_destroy(&LOCK_pending_checkpoint);
mysql_cond_destroy(&COND_pool);
mysql_cond_destroy(&COND_active);
mysql_cond_destroy(&COND_queue_busy);
@@ -7257,9 +7793,12 @@ void TC_LOG_MMAP::close()
}
if (inited>=5) // cannot do in the switch because of Windows
mysql_file_delete(key_file_tclog, logname, MYF(MY_WME));
+ if (pending_checkpoint)
+ my_free(pending_checkpoint);
inited=0;
}
+
int TC_LOG_MMAP::recover()
{
HASH xids;
@@ -7345,14 +7884,6 @@ int TC_LOG::using_heuristic_recover()
/****** transaction coordinator log for 2pc - binlog() based solution ******/
#define TC_LOG_BINLOG MYSQL_BIN_LOG
-/**
- @todo
- keep in-memory list of prepared transactions
- (add to list in log(), remove on unlog())
- and copy it to the new binlog if rotated
- but let's check the behaviour of tc_log_page_waits first!
-*/
-
int TC_LOG_BINLOG::open(const char *opt_name)
{
LOG_INFO log_info;
@@ -7361,10 +7892,6 @@ int TC_LOG_BINLOG::open(const char *opt_name)
DBUG_ASSERT(total_ha_2pc > 1);
DBUG_ASSERT(opt_name && opt_name[0]);
- mysql_mutex_init(key_BINLOG_LOCK_prep_xids,
- &LOCK_prep_xids, MY_MUTEX_INIT_FAST);
- mysql_cond_init(key_BINLOG_COND_prep_xids, &COND_prep_xids, 0);
-
if (!my_b_inited(&index_file))
{
/* There was a failure to open the index file, can't open the binlog */
@@ -7375,7 +7902,7 @@ int TC_LOG_BINLOG::open(const char *opt_name)
if (using_heuristic_recover())
{
/* generate a new binlog to mask a corrupted one */
- open(opt_name, LOG_BIN, 0, WRITE_CACHE, 0, max_binlog_size, 0, TRUE);
+ open(opt_name, LOG_BIN, 0, WRITE_CACHE, max_binlog_size, 0, TRUE);
cleanup();
return 1;
}
@@ -7423,7 +7950,8 @@ int TC_LOG_BINLOG::open(const char *opt_name)
ev->flags & LOG_EVENT_BINLOG_IN_USE_F)
{
sql_print_information("Recovering after a crash using %s", opt_name);
- error= recover(&log, (Format_description_log_event *)ev);
+ error= recover(&log_info, log_name, &log,
+ (Format_description_log_event *)ev);
}
else
error=0;
@@ -7443,9 +7971,6 @@ err:
/** This is called on shutdown, after ha_panic. */
void TC_LOG_BINLOG::close()
{
- DBUG_ASSERT(prepared_xids==0);
- mysql_mutex_destroy(&LOCK_prep_xids);
- mysql_cond_destroy(&COND_prep_xids);
}
/*
@@ -7470,7 +7995,17 @@ TC_LOG_BINLOG::log_and_order(THD *thd, my_xid xid, bool all,
DEBUG_SYNC(thd, "binlog_after_log_and_order");
- DBUG_RETURN(!err);
+ if (err)
+ DBUG_RETURN(0);
+ /*
+ If using explicit user XA, we will not have XID. We must still return a
+ non-zero cookie (as zero cookie signals error).
+ */
+ if (!xid || !cache_mngr->need_unlog)
+ DBUG_RETURN(BINLOG_COOKIE_DUMMY(cache_mngr->delayed_error));
+ else
+ DBUG_RETURN(BINLOG_COOKIE_MAKE(cache_mngr->binlog_id,
+ cache_mngr->delayed_error));
}
/*
@@ -7485,80 +8020,408 @@ TC_LOG_BINLOG::log_and_order(THD *thd, my_xid xid, bool all,
binary log.
*/
void
-TC_LOG_BINLOG::mark_xids_active(uint xid_count)
+TC_LOG_BINLOG::mark_xids_active(ulong binlog_id, uint xid_count)
{
+ xid_count_per_binlog *b;
+
DBUG_ENTER("TC_LOG_BINLOG::mark_xids_active");
- DBUG_PRINT("info", ("xid_count=%u", xid_count));
- mysql_mutex_lock(&LOCK_prep_xids);
- prepared_xids+= xid_count;
- mysql_mutex_unlock(&LOCK_prep_xids);
+ DBUG_PRINT("info", ("binlog_id=%lu xid_count=%u", binlog_id, xid_count));
+
+ mysql_mutex_lock(&LOCK_xid_list);
+ I_List_iterator<xid_count_per_binlog> it(binlog_xid_count_list);
+ while ((b= it++))
+ {
+ if (b->binlog_id == binlog_id)
+ {
+ b->xid_count += xid_count;
+ break;
+ }
+ }
+ /*
+ As we do not delete elements until count reach zero, elements should always
+ be found.
+ */
+ DBUG_ASSERT(b);
+ mysql_mutex_unlock(&LOCK_xid_list);
DBUG_VOID_RETURN;
}
/*
- Once an XID is committed, it is safe to rotate the binary log, as it can no
- longer be needed during crash recovery.
+ Once an XID is committed, it can no longer be needed during crash recovery,
+ as it has been durably recorded on disk as "committed".
This function is called to mark an XID this way. It needs to decrease the
- count of pending XIDs, and signal the log rotator thread when it reaches zero.
+ count of pending XIDs in the corresponding binlog. When the count reaches
+ zero (for an "old" binlog that is not the active one), that binlog file no
+ longer need to be scanned during crash recovery, so we can log a new binlog
+ checkpoint.
*/
void
-TC_LOG_BINLOG::mark_xid_done()
+TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint)
{
- my_bool send_signal;
+ xid_count_per_binlog *b;
+ bool first;
+ ulong current;
DBUG_ENTER("TC_LOG_BINLOG::mark_xid_done");
- mysql_mutex_lock(&LOCK_prep_xids);
- // prepared_xids can be 0 if the transaction had ignorable errors.
- DBUG_ASSERT(prepared_xids >= 0);
- if (prepared_xids > 0)
- prepared_xids--;
- send_signal= (prepared_xids == 0);
- mysql_mutex_unlock(&LOCK_prep_xids);
- if (send_signal) {
- DBUG_PRINT("info", ("prepared_xids=%lu", prepared_xids));
- mysql_cond_signal(&COND_prep_xids);
+
+ mysql_mutex_lock(&LOCK_xid_list);
+ current= current_binlog_id;
+ I_List_iterator<xid_count_per_binlog> it(binlog_xid_count_list);
+ first= true;
+ while ((b= it++))
+ {
+ if (b->binlog_id == binlog_id)
+ {
+ --b->xid_count;
+ break;
+ }
+ first= false;
+ }
+ /* Binlog is always found, as we do not remove until count reaches 0 */
+ DBUG_ASSERT(b);
+ /*
+ If a RESET MASTER is pending, we are about to remove all log files, and
+ the RESET MASTER thread is waiting for all pending unlog() calls to
+ complete while holding LOCK_log. In this case we should not log a binlog
+ checkpoint event (it would be deleted immediately anyway and we would
+ deadlock on LOCK_log) but just signal the thread.
+ */
+ if (unlikely(reset_master_pending))
+ {
+ mysql_cond_signal(&COND_xid_list);
+ mysql_mutex_unlock(&LOCK_xid_list);
+ DBUG_VOID_RETURN;
}
+
+ if (likely(binlog_id == current) || b->xid_count != 0 || !first ||
+ !write_checkpoint)
+ {
+ /* No new binlog checkpoint reached yet. */
+ mysql_mutex_unlock(&LOCK_xid_list);
+ DBUG_VOID_RETURN;
+ }
+
+ /*
+ Now log a binlog checkpoint for the first binlog file with a non-zero count.
+
+ Note that it is possible (though perhaps unlikely) that when count of
+ binlog (N-2) drops to zero, binlog (N-1) is already at zero. So we may
+ need to skip several entries before we find the one to log in the binlog
+ checkpoint event.
+
+ We chain the locking of LOCK_xid_list and LOCK_log, so that we ensure that
+ Binlog_checkpoint_events are logged in order. This simplifies recovery a
+ bit, as it can just take the last binlog checkpoint in the log, rather
+ than compare all found against each other to find the one pointing to the
+ most recent binlog.
+
+ Note also that we need to first release LOCK_xid_list, then aquire
+ LOCK_log, then re-aquire LOCK_xid_list. If we were to take LOCK_log while
+ holding LOCK_xid_list, we might deadlock with other threads that take the
+ locks in the opposite order.
+ */
+
+ mysql_mutex_unlock(&LOCK_xid_list);
+ mysql_mutex_lock(&LOCK_log);
+ mysql_mutex_lock(&LOCK_xid_list);
+ /* We need to reload current_binlog_id due to release/re-take of lock. */
+ current= current_binlog_id;
+
+ for (;;)
+ {
+ /* Remove initial element(s) with zero count. */
+ b= binlog_xid_count_list.head();
+ /*
+ We must not remove all elements in the list - the entry for the current
+ binlog must be present always.
+ */
+ DBUG_ASSERT(b);
+ if (b->binlog_id == current || b->xid_count > 0)
+ break;
+ my_free(binlog_xid_count_list.get());
+ }
+
+ mysql_mutex_unlock(&LOCK_xid_list);
+ write_binlog_checkpoint_event_already_locked(b->binlog_name,
+ b->binlog_name_len);
+ mysql_mutex_unlock(&LOCK_log);
DBUG_VOID_RETURN;
}
int TC_LOG_BINLOG::unlog(ulong cookie, my_xid xid)
{
DBUG_ENTER("TC_LOG_BINLOG::unlog");
- if (xid)
- mark_xid_done();
- /* As ::write_transaction_to_binlog() did not rotate, do it here. */
- DBUG_RETURN(rotate_and_purge(0));
+ if (!xid)
+ DBUG_RETURN(0);
+
+ if (!BINLOG_COOKIE_IS_DUMMY(cookie))
+ mark_xid_done(BINLOG_COOKIE_GET_ID(cookie), true);
+ /*
+ See comment in trx_group_commit_leader() - if rotate() gave a failure,
+ we delay the return of error code to here.
+ */
+ DBUG_RETURN(BINLOG_COOKIE_GET_ERROR_FLAG(cookie));
}
-int TC_LOG_BINLOG::recover(IO_CACHE *log, Format_description_log_event *fdle)
+void
+TC_LOG_BINLOG::commit_checkpoint_notify(void *cookie)
+{
+ xid_count_per_binlog *entry= static_cast<xid_count_per_binlog *>(cookie);
+ mysql_mutex_lock(&LOCK_binlog_background_thread);
+ entry->next_in_queue= binlog_background_thread_queue;
+ binlog_background_thread_queue= entry;
+ mysql_cond_signal(&COND_binlog_background_thread);
+ mysql_mutex_unlock(&LOCK_binlog_background_thread);
+}
+
+/*
+ Binlog background thread.
+
+ This thread is used to log binlog checkpoints in the background, rather than
+ in the context of random storage engine threads that happen to call
+ commit_checkpoint_notify_ha() and may not like the delays while syncing
+ binlog to disk or may not be setup with all my_thread_init() and other
+ necessary stuff.
+
+ In the future, this thread could also be used to do log rotation in the
+ background, which could elimiate all stalls around binlog rotations.
+*/
+pthread_handler_t
+binlog_background_thread(void *arg __attribute__((unused)))
+{
+ bool stop;
+ MYSQL_BIN_LOG::xid_count_per_binlog *queue, *next;
+ THD *thd;
+ my_thread_init();
+ DBUG_ENTER("binlog_background_thread");
+
+ thd= new THD;
+ thd->system_thread= SYSTEM_THREAD_BINLOG_BACKGROUND;
+ thd->thread_stack= (char*) &thd; /* Set approximate stack start */
+ mysql_mutex_lock(&LOCK_thread_count);
+ thd->thread_id= thread_id++;
+ mysql_mutex_unlock(&LOCK_thread_count);
+ thd->store_globals();
+
+ for (;;)
+ {
+ /*
+ Wait until there is something in the queue to process, or we are asked
+ to shut down.
+ */
+ thd_proc_info(thd, "Waiting for background binlog tasks");
+ mysql_mutex_lock(&mysql_bin_log.LOCK_binlog_background_thread);
+ for (;;)
+ {
+ stop= binlog_background_thread_stop;
+ queue= binlog_background_thread_queue;
+ if (stop || queue)
+ break;
+ mysql_cond_wait(&mysql_bin_log.COND_binlog_background_thread,
+ &mysql_bin_log.LOCK_binlog_background_thread);
+ }
+ /* Grab the queue, if any. */
+ binlog_background_thread_queue= NULL;
+ mysql_mutex_unlock(&mysql_bin_log.LOCK_binlog_background_thread);
+
+ /* Process any incoming commit_checkpoint_notify() calls. */
+ while (queue)
+ {
+ thd_proc_info(thd, "Processing binlog checkpoint notification");
+ /* Grab next pointer first, as mark_xid_done() may free the element. */
+ next= queue->next_in_queue;
+ mysql_bin_log.mark_xid_done(queue->binlog_id, true);
+ queue= next;
+
+ DBUG_EXECUTE_IF("binlog_background_checkpoint_processed",
+ DBUG_ASSERT(!debug_sync_set_action(
+ thd,
+ STRING_WITH_LEN("now SIGNAL binlog_background_checkpoint_processed")));
+ );
+ }
+
+ if (stop)
+ break;
+ }
+
+ thd_proc_info(thd, "Stopping binlog background thread");
+
+ mysql_mutex_lock(&LOCK_thread_count);
+ delete thd;
+ mysql_mutex_unlock(&LOCK_thread_count);
+
+ my_thread_end();
+
+ /* Signal that we are (almost) stopped. */
+ mysql_mutex_lock(&mysql_bin_log.LOCK_binlog_background_thread);
+ binlog_background_thread_stop= false;
+ mysql_cond_signal(&mysql_bin_log.COND_binlog_background_thread_end);
+ mysql_mutex_unlock(&mysql_bin_log.LOCK_binlog_background_thread);
+
+ DBUG_RETURN(0);
+}
+
+#ifdef HAVE_PSI_INTERFACE
+static PSI_thread_key key_thread_binlog;
+
+static PSI_thread_info all_binlog_threads[]=
+{
+ { &key_thread_binlog, "binlog_background", PSI_FLAG_GLOBAL},
+};
+#endif /* HAVE_PSI_INTERFACE */
+
+static bool
+start_binlog_background_thread()
+{
+ pthread_t th;
+
+#ifdef HAVE_PSI_INTERFACE
+ if (PSI_server)
+ PSI_server->register_thread("sql", all_binlog_threads,
+ array_elements(all_binlog_threads));
+#endif
+
+ if (mysql_thread_create(key_thread_binlog, &th, NULL,
+ binlog_background_thread, NULL))
+ return 1;
+
+ binlog_background_thread_started= true;
+ return 0;
+}
+
+
+int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name,
+ IO_CACHE *first_log,
+ Format_description_log_event *fdle)
{
Log_event *ev;
HASH xids;
MEM_ROOT mem_root;
+ char binlog_checkpoint_name[FN_REFLEN];
+ bool binlog_checkpoint_found;
+ bool first_round;
+ IO_CACHE log;
+ File file= -1;
+ const char *errmsg;
if (! fdle->is_valid() ||
my_hash_init(&xids, &my_charset_bin, TC_LOG_PAGE_SIZE/3, 0,
sizeof(my_xid), 0, 0, MYF(0)))
goto err1;
- init_alloc_root(&mem_root, TC_LOG_PAGE_SIZE, TC_LOG_PAGE_SIZE);
+ init_alloc_root(&mem_root, TC_LOG_PAGE_SIZE, TC_LOG_PAGE_SIZE, MYF(0));
fdle->flags&= ~LOG_EVENT_BINLOG_IN_USE_F; // abort on the first error
- while ((ev= Log_event::read_log_event(log, 0, fdle,
- opt_master_verify_checksum))
- && ev->is_valid())
+ /*
+ Scan the binlog for XIDs that need to be committed if still in the
+ prepared stage.
+
+ Start with the latest binlog file, then continue with any other binlog
+ files if the last found binlog checkpoint indicates it is needed.
+ */
+
+ binlog_checkpoint_found= false;
+ first_round= true;
+ for (;;)
{
- if (ev->get_type_code() == XID_EVENT)
+ while ((ev= Log_event::read_log_event(first_round ? first_log : &log,
+ 0, fdle, opt_master_verify_checksum))
+ && ev->is_valid())
{
- Xid_log_event *xev=(Xid_log_event *)ev;
- uchar *x= (uchar *) memdup_root(&mem_root, (uchar*) &xev->xid,
- sizeof(xev->xid));
- if (!x || my_hash_insert(&xids, x))
+ switch (ev->get_type_code())
+ {
+ case XID_EVENT:
+ {
+ Xid_log_event *xev=(Xid_log_event *)ev;
+ uchar *x= (uchar *) memdup_root(&mem_root, (uchar*) &xev->xid,
+ sizeof(xev->xid));
+ if (!x || my_hash_insert(&xids, x))
+ {
+ delete ev;
+ goto err2;
+ }
+ break;
+ }
+ case BINLOG_CHECKPOINT_EVENT:
+ if (first_round)
+ {
+ uint dir_len;
+ Binlog_checkpoint_log_event *cev= (Binlog_checkpoint_log_event *)ev;
+ if (cev->binlog_file_len >= FN_REFLEN)
+ sql_print_warning("Incorrect binlog checkpoint event with too "
+ "long file name found.");
+ else
+ {
+ /*
+ Note that we cannot use make_log_name() here, as we have not yet
+ initialised MYSQL_BIN_LOG::log_file_name.
+ */
+ dir_len= dirname_length(last_log_name);
+ strmake(strnmov(binlog_checkpoint_name, last_log_name, dir_len),
+ cev->binlog_file_name, FN_REFLEN - 1 - dir_len);
+ binlog_checkpoint_found= true;
+ }
+ break;
+ }
+ default:
+ /* Nothing. */
+ break;
+ }
+ delete ev;
+ }
+
+ /*
+ If the last binlog checkpoint event points to an older log, we have to
+ scan all logs from there also, to get all possible XIDs to recover.
+
+ If there was no binlog checkpoint event at all, this means the log was
+ written by an older version of MariaDB (or MySQL) - these always have an
+ (implicit) binlog checkpoint event at the start of the last binlog file.
+ */
+ if (first_round)
+ {
+ if (!binlog_checkpoint_found)
+ break;
+ first_round= false;
+ DBUG_EXECUTE_IF("xa_recover_expect_master_bin_000004",
+ if (0 != strcmp("./master-bin.000004", binlog_checkpoint_name) &&
+ 0 != strcmp(".\\master-bin.000004", binlog_checkpoint_name))
+ DBUG_SUICIDE();
+ );
+ if (find_log_pos(linfo, binlog_checkpoint_name, 1))
+ {
+ sql_print_error("Binlog file '%s' not found in binlog index, needed "
+ "for recovery. Aborting.", binlog_checkpoint_name);
goto err2;
+ }
+ }
+ else
+ {
+ end_io_cache(&log);
+ mysql_file_close(file, MYF(MY_WME));
+ file= -1;
+ }
+
+ if (0 == strcmp(linfo->log_file_name, last_log_name))
+ break; // No more files to do
+ if ((file= open_binlog(&log, linfo->log_file_name, &errmsg)) < 0)
+ {
+ sql_print_error("%s", errmsg);
+ goto err2;
+ }
+ /*
+ We do not need to read the Format_description_log_event of other binlog
+ files. It is not possible for a binlog checkpoint to span multiple
+ binlog files written by different versions of the server. So we can use
+ the first one read for reading from all binlog files.
+ */
+ if (find_next_log(linfo, 1))
+ {
+ sql_print_error("Error reading binlog files during recovery. Aborting.");
+ goto err2;
}
- delete ev;
}
if (ha_recover(&xids))
@@ -7569,6 +8432,11 @@ int TC_LOG_BINLOG::recover(IO_CACHE *log, Format_description_log_event *fdle)
return 0;
err2:
+ if (file >= 0)
+ {
+ end_io_cache(&log);
+ mysql_file_close(file, MYF(MY_WME));
+ }
free_root(&mem_root, MYF(0));
my_hash_free(&xids);
err1:
@@ -7634,10 +8502,13 @@ binlog_checksum_update(MYSQL_THD thd, struct st_mysql_sys_var *var,
{
ulong value= *((ulong *)save);
bool check_purge= false;
+ ulong prev_binlog_id;
+ LINT_INIT(prev_binlog_id);
mysql_mutex_lock(mysql_bin_log.get_log_lock());
if(mysql_bin_log.is_open())
{
+ prev_binlog_id= mysql_bin_log.current_binlog_id;
if (binlog_checksum_options != value)
mysql_bin_log.checksum_alg_reset= (uint8) value;
if (mysql_bin_log.rotate(true, &check_purge))
@@ -7651,7 +8522,7 @@ binlog_checksum_update(MYSQL_THD thd, struct st_mysql_sys_var *var,
mysql_bin_log.checksum_alg_reset= BINLOG_CHECKSUM_ALG_UNDEF;
mysql_mutex_unlock(mysql_bin_log.get_log_lock());
if (check_purge)
- mysql_bin_log.purge();
+ mysql_bin_log.checkpoint_and_purge(prev_binlog_id);
}
diff --git a/sql/log.h b/sql/log.h
index e388df61b38..80fe34b5ff2 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -49,6 +49,7 @@ class TC_LOG
bool need_prepare_ordered,
bool need_commit_ordered) = 0;
virtual int unlog(ulong cookie, my_xid xid)=0;
+ virtual void commit_checkpoint_notify(void *cookie)= 0;
protected:
/*
@@ -98,8 +99,12 @@ public:
return 1;
}
int unlog(ulong cookie, my_xid xid) { return 0; }
+ void commit_checkpoint_notify(void *cookie) { DBUG_ASSERT(0); };
};
+#define TC_LOG_PAGE_SIZE 8192
+#define TC_LOG_MIN_SIZE (3*TC_LOG_PAGE_SIZE)
+
#ifdef HAVE_MMAP
class TC_LOG_MMAP: public TC_LOG
{
@@ -110,6 +115,12 @@ class TC_LOG_MMAP: public TC_LOG
PS_DIRTY // new xids added since last sync
} PAGE_STATE;
+ struct pending_cookies {
+ uint count;
+ uint pending_count;
+ ulong cookies[TC_LOG_PAGE_SIZE/sizeof(my_xid)];
+ };
+
private:
typedef struct st_page {
struct st_page *next; // page a linked in a fifo queue
@@ -141,7 +152,7 @@ class TC_LOG_MMAP: public TC_LOG
one has to use active->lock.
Same for LOCK_pool and LOCK_sync
*/
- mysql_mutex_t LOCK_active, LOCK_pool, LOCK_sync;
+ mysql_mutex_t LOCK_active, LOCK_pool, LOCK_sync, LOCK_pending_checkpoint;
mysql_cond_t COND_pool, COND_active;
/*
Queue of threads that need to call commit_ordered().
@@ -163,14 +174,16 @@ class TC_LOG_MMAP: public TC_LOG
*/
mysql_cond_t COND_queue_busy;
my_bool commit_ordered_queue_busy;
+ pending_cookies* pending_checkpoint;
public:
- TC_LOG_MMAP(): inited(0) {}
+ TC_LOG_MMAP(): inited(0), pending_checkpoint(0) {}
int open(const char *opt_name);
void close();
int log_and_order(THD *thd, my_xid xid, bool all,
bool need_prepare_ordered, bool need_commit_ordered);
int unlog(ulong cookie, my_xid xid);
+ void commit_checkpoint_notify(void *cookie);
int recover();
private:
@@ -178,6 +191,7 @@ class TC_LOG_MMAP: public TC_LOG
void get_active_from_pool();
int sync();
int overflow();
+ int delete_entry(ulong cookie);
};
#else
#define TC_LOG_MMAP TC_LOG_DUMMY
@@ -354,6 +368,33 @@ private:
time_t last_time;
};
+/*
+ We assign each binlog file an internal ID, used to identify them for unlog().
+ The IDs start from 0 and increment for each new binlog created.
+
+ In unlog() we need to know the ID of the binlog file that the corresponding
+ transaction was written into. We also need a special value for a corner
+ case where there is no corresponding binlog id (since nothing was logged).
+ And we need an error flag to mark that unlog() must return failure.
+
+ We use the following macros to pack all of this information into the single
+ ulong available with log_and_order() / unlog().
+
+ Note that we cannot use the value 0 for cookie, as that is reserved as error
+ return value from log_and_order().
+ */
+#define BINLOG_COOKIE_ERROR_RETURN 0
+#define BINLOG_COOKIE_DUMMY_ID 1
+#define BINLOG_COOKIE_BASE 2
+#define BINLOG_COOKIE_DUMMY(error_flag) \
+ ( (BINLOG_COOKIE_DUMMY_ID<<1) | ((error_flag)&1) )
+#define BINLOG_COOKIE_MAKE(id, error_flag) \
+ ( (((id)+BINLOG_COOKIE_BASE)<<1) | ((error_flag)&1) )
+#define BINLOG_COOKIE_GET_ERROR_FLAG(c) ((c) & 1)
+#define BINLOG_COOKIE_GET_ID(c) ( ((ulong)(c)>>1) - BINLOG_COOKIE_BASE )
+#define BINLOG_COOKIE_IS_DUMMY(c) \
+ ( ((ulong)(c)>>1) == BINLOG_COOKIE_DUMMY_ID )
+
class binlog_cache_mngr;
class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
{
@@ -392,12 +433,35 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
IO_CACHE *error_cache;
/* This is the `all' parameter for ha_commit_ordered(). */
bool all;
+ /*
+ True if we need to increment xid_count in trx_group_commit_leader() and
+ decrement in unlog() (this is needed if there is a participating engine
+ that does not implement the commit_checkpoint_request() handlerton
+ method).
+ */
+ bool need_unlog;
+ /*
+ Fields used to pass the necessary information to the last thread in a
+ group commit, only used when opt_optimize_thread_scheduling is not set.
+ */
+ bool check_purge;
+ ulong binlog_id;
};
+ /*
+ When this is set, a RESET MASTER is in progress.
+
+ Then we should not write any binlog checkpoints into the binlog (that
+ could result in deadlock on LOCK_log, and we will delete all binlog files
+ anyway). Instead we should signal COND_xid_list whenever a new binlog
+ checkpoint arrives - when all have arrived, RESET MASTER will complete.
+ */
+ bool reset_master_pending;
+
/* LOCK_log and LOCK_index are inited by init_pthread_objects() */
mysql_mutex_t LOCK_index;
- mysql_mutex_t LOCK_prep_xids;
- mysql_cond_t COND_prep_xids;
+ mysql_mutex_t LOCK_xid_list;
+ mysql_cond_t COND_xid_list;
mysql_cond_t update_cond;
ulonglong bytes_written;
IO_CACHE index_file;
@@ -414,27 +478,14 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
The max size before rotation (usable only if log_type == LOG_BIN: binary
logs and relay logs).
For a binlog, max_size should be max_binlog_size.
- For a relay log, it should be max_relay_log_size if this is non-zero,
- max_binlog_size otherwise.
max_size is set in init(), and dynamically changed (when one does SET
- GLOBAL MAX_BINLOG_SIZE|MAX_RELAY_LOG_SIZE) by fix_max_binlog_size and
- fix_max_relay_log_size).
+ GLOBAL MAX_BINLOG_SIZE|MAX_RELAY_LOG_SIZE) from sys_vars.cc
*/
ulong max_size;
- long prepared_xids; /* for tc log - number of xids to remember */
// current file sequence number for load data infile binary logging
uint file_id;
uint open_count; // For replication
int readers_count;
- bool need_start_event;
- /*
- no_auto_events means we don't want any of these automatic events :
- Start/Rotate/Stop. That is, in 4.x when we rotate a relay log, we don't
- want a Rotate_log event to be written to the relay log. When we start a
- relay log etc. So in 4.x this is 1 for relay logs, 0 for binlogs.
- In 5.0 it's 0 for relay logs too!
- */
- bool no_auto_events;
/* Queue of transactions queued up to participate in group commit. */
group_commit_entry *group_commit_queue;
/*
@@ -470,13 +521,40 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
*/
int new_file_without_locking();
int new_file_impl(bool need_lock);
+ void do_checkpoint_request(ulong binlog_id);
+ void purge();
int write_transaction_or_stmt(group_commit_entry *entry);
bool write_transaction_to_binlog_events(group_commit_entry *entry);
void trx_group_commit_leader(group_commit_entry *leader);
- void mark_xid_done();
- void mark_xids_active(uint xid_count);
public:
+ /*
+ A list of struct xid_count_per_binlog is used to keep track of how many
+ XIDs are in prepared, but not committed, state in each binlog. And how
+ many commit_checkpoint_request()'s are pending.
+
+ When count drops to zero in a binlog after rotation, it means that there
+ are no more XIDs in prepared state, so that binlog is no longer needed
+ for XA crash recovery, and we can log a new binlog checkpoint event.
+
+ The list is protected against simultaneous access from multiple
+ threads by LOCK_xid_list.
+ */
+ struct xid_count_per_binlog : public ilink {
+ char *binlog_name;
+ uint binlog_name_len;
+ ulong binlog_id;
+ /* Total prepared XIDs and pending checkpoint requests in this binlog. */
+ long xid_count;
+ /* For linking in requests to the binlog background thread. */
+ xid_count_per_binlog *next_in_queue;
+ xid_count_per_binlog(); /* Give link error if constructor used. */
+ };
+ I_List<xid_count_per_binlog> binlog_xid_count_list;
+ mysql_mutex_t LOCK_binlog_background_thread;
+ mysql_cond_t COND_binlog_background_thread;
+ mysql_cond_t COND_binlog_background_thread_end;
+
using MYSQL_LOG::generate_name;
using MYSQL_LOG::is_open;
@@ -534,6 +612,7 @@ public:
*/
char last_commit_pos_file[FN_REFLEN];
my_off_t last_commit_pos_offset;
+ ulong current_binlog_id;
MYSQL_BIN_LOG(uint *sync_period);
/*
@@ -562,7 +641,9 @@ public:
int log_and_order(THD *thd, my_xid xid, bool all,
bool need_prepare_ordered, bool need_commit_ordered);
int unlog(ulong cookie, my_xid xid);
- int recover(IO_CACHE *log, Format_description_log_event *fdle);
+ void commit_checkpoint_notify(void *cookie);
+ int recover(LOG_INFO *linfo, const char *last_log_name, IO_CACHE *first_log,
+ Format_description_log_event *fdle);
#if !defined(MYSQL_CLIENT)
int flush_and_set_pending_rows_event(THD *thd, Rows_log_event* event,
@@ -590,15 +671,14 @@ public:
void signal_update();
void wait_for_update_relay_log(THD* thd);
int wait_for_update_bin_log(THD* thd, const struct timespec * timeout);
- void set_need_start_event() { need_start_event = 1; }
- void init(bool no_auto_events_arg, ulong max_size);
+ void init(ulong max_size);
void init_pthread_objects();
void cleanup();
bool open(const char *log_name,
enum_log_type log_type,
const char *new_name,
enum cache_type io_cache_type_arg,
- bool no_auto_events_arg, ulong max_size,
+ ulong max_size,
bool null_created,
bool need_mutex);
bool open_index_file(const char *index_file_name_arg,
@@ -614,6 +694,7 @@ public:
bool write_incident_already_locked(THD *thd);
bool write_incident(THD *thd);
+ void write_binlog_checkpoint_event_already_locked(const char *name, uint len);
int write_cache(THD *thd, IO_CACHE *cache);
void set_write_error(THD *thd, bool is_transactional);
bool check_write_error(THD *thd);
@@ -629,11 +710,14 @@ public:
bool appendv(const char* buf,uint len,...);
bool append(Log_event* ev);
+ void mark_xids_active(ulong cookie, uint xid_count);
+ void mark_xid_done(ulong cookie, bool write_checkpoint);
void make_log_name(char* buf, const char* log_ident);
bool is_active(const char* log_file_name);
+ bool can_purge_log(const char *log_file_name);
int update_log_index(LOG_INFO* linfo, bool need_update_threads);
int rotate(bool force_rotate, bool* check_purge);
- void purge();
+ void checkpoint_and_purge(ulong binlog_id);
int rotate_and_purge(bool force_rotate);
/**
Flush binlog cache and synchronize to disk.
@@ -664,7 +748,7 @@ public:
int register_create_index_entry(const char* entry);
int purge_index_entry(THD *thd, ulonglong *decrease_log_space,
bool need_mutex);
- bool reset_logs(THD* thd);
+ bool reset_logs(THD* thd, bool create_new_log);
void close(uint exiting);
void clear_inuse_flag_when_closing(File file);
diff --git a/sql/log_event.cc b/sql/log_event.cc
index d5cfa9367c5..10f0fe1e931 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -748,6 +748,7 @@ const char* Log_event::get_type_str(Log_event_type type)
case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query";
case INCIDENT_EVENT: return "Incident";
case ANNOTATE_ROWS_EVENT: return "Annotate_rows";
+ case BINLOG_CHECKPOINT_EVENT: return "Binlog_checkpoint";
default: return "Unknown"; /* impossible */
}
}
@@ -922,9 +923,9 @@ int Log_event::do_update_pos(Relay_log_info *rli)
Log_event::enum_skip_reason
Log_event::do_shall_skip(Relay_log_info *rli)
{
- DBUG_PRINT("info", ("ev->server_id=%lu, ::server_id=%lu,"
- " rli->replicate_same_server_id=%d,"
- " rli->slave_skip_counter=%d",
+ DBUG_PRINT("info", ("ev->server_id: %lu, ::server_id: %lu,"
+ " rli->replicate_same_server_id: %d,"
+ " rli->slave_skip_counter: %lu",
(ulong) server_id, (ulong) ::server_id,
rli->replicate_same_server_id,
rli->slave_skip_counter));
@@ -1407,7 +1408,7 @@ err:
DBUG_ASSERT(error != 0);
sql_print_error("Error in Log_event::read_log_event(): "
"'%s', data_len: %d, event_type: %d",
- error,data_len,head[EVENT_TYPE_OFFSET]);
+ error,data_len,(uchar)(head[EVENT_TYPE_OFFSET]));
my_free(buf);
/*
The SQL slave thread will check if file->error<0 to know
@@ -1556,6 +1557,9 @@ Log_event* Log_event::read_log_event(const char* buf, uint event_len,
case ROTATE_EVENT:
ev = new Rotate_log_event(buf, event_len, description_event);
break;
+ case BINLOG_CHECKPOINT_EVENT:
+ ev = new Binlog_checkpoint_log_event(buf, event_len, description_event);
+ break;
#ifdef HAVE_REPLICATION
case SLAVE_EVENT: /* can never happen (unused event) */
ev = new Slave_log_event(buf, event_len, description_event);
@@ -3320,6 +3324,115 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
}
+/*
+ Replace a binlog event read into a packet with a dummy event. Either a
+ Query_log_event that has just a comment, or if that will not fit in the
+ space used for the event to be replaced, then a NULL user_var event.
+
+ This is used when sending binlog data to a slave which does not understand
+ this particular event and which is too old to support informational events
+ or holes in the event stream.
+
+ This allows to write such events into the binlog on the master and still be
+ able to replicate against old slaves without them breaking.
+
+ Clears the flag LOG_EVENT_THREAD_SPECIFIC_F and set LOG_EVENT_SUPPRESS_USE_F.
+ Overwrites the type with QUERY_EVENT (or USER_VAR_EVENT), and replaces the
+ body with a minimal query / NULL user var.
+
+ Returns zero on success, -1 if error due to too little space in original
+ event. A minimum of 25 bytes (19 bytes fixed header + 6 bytes in the body)
+ is needed in any event to be replaced with a dummy event.
+*/
+int
+Query_log_event::dummy_event(String *packet, ulong ev_offset,
+ uint8 checksum_alg)
+{
+ uchar *p= (uchar *)packet->ptr() + ev_offset;
+ size_t data_len= packet->length() - ev_offset;
+ uint16 flags;
+ static const size_t min_user_var_event_len=
+ LOG_EVENT_HEADER_LEN + UV_NAME_LEN_SIZE + 1 + UV_VAL_IS_NULL; // 25
+ static const size_t min_query_event_len=
+ LOG_EVENT_HEADER_LEN + QUERY_HEADER_LEN + 1 + 1; // 34
+
+ if (checksum_alg == BINLOG_CHECKSUM_ALG_CRC32)
+ data_len-= BINLOG_CHECKSUM_LEN;
+ else
+ DBUG_ASSERT(checksum_alg == BINLOG_CHECKSUM_ALG_UNDEF ||
+ checksum_alg == BINLOG_CHECKSUM_ALG_OFF);
+
+ if (data_len < min_user_var_event_len)
+ /* Cannot replace with dummy, event too short. */
+ return -1;
+
+ flags= uint2korr(p + FLAGS_OFFSET);
+ flags&= ~LOG_EVENT_THREAD_SPECIFIC_F;
+ flags|= LOG_EVENT_SUPPRESS_USE_F;
+ int2store(p + FLAGS_OFFSET, flags);
+
+ if (data_len < min_query_event_len)
+ {
+ /*
+ Have to use dummy user_var event for such a short packet.
+
+ This works, but the event will be considered part of an event group with
+ the following event. So for example @@global.sql_slave_skip_counter=1
+ will skip not only the dummy event, but also the immediately following
+ event.
+
+ We write a NULL user var with the name @`!dummyvar` (or as much
+ as that as will fit within the size of the original event - so
+ possibly just @`!`).
+ */
+ static const char var_name[]= "!dummyvar";
+ uint name_len= data_len - (min_user_var_event_len - 1);
+
+ p[EVENT_TYPE_OFFSET]= USER_VAR_EVENT;
+ int4store(p + LOG_EVENT_HEADER_LEN, name_len);
+ memcpy(p + LOG_EVENT_HEADER_LEN + UV_NAME_LEN_SIZE, var_name, name_len);
+ p[LOG_EVENT_HEADER_LEN + UV_NAME_LEN_SIZE + name_len]= 1; // indicates NULL
+ }
+ else
+ {
+ /*
+ Use a dummy query event, just a comment.
+ */
+ static const char message[]=
+ "# Dummy event replacing event type %u that slave cannot handle.";
+ char buf[sizeof(message)+1]; /* +1, as %u can expand to 3 digits. */
+ uchar old_type= p[EVENT_TYPE_OFFSET];
+ uchar *q= p + LOG_EVENT_HEADER_LEN;
+ size_t comment_len, len;
+
+ p[EVENT_TYPE_OFFSET]= QUERY_EVENT;
+ int4store(q + Q_THREAD_ID_OFFSET, 0);
+ int4store(q + Q_EXEC_TIME_OFFSET, 0);
+ q[Q_DB_LEN_OFFSET]= 0;
+ int2store(q + Q_ERR_CODE_OFFSET, 0);
+ int2store(q + Q_STATUS_VARS_LEN_OFFSET, 0);
+ q[Q_DATA_OFFSET]= 0; /* Zero terminator for empty db */
+ q+= Q_DATA_OFFSET + 1;
+ len= my_snprintf(buf, sizeof(buf), message, old_type);
+ comment_len= data_len - (min_query_event_len - 1);
+ if (comment_len <= len)
+ memcpy(q, buf, comment_len);
+ else
+ {
+ memcpy(q, buf, len);
+ memset(q+len, ' ', comment_len - len);
+ }
+ }
+
+ if (checksum_alg == BINLOG_CHECKSUM_ALG_CRC32)
+ {
+ ha_checksum crc= my_checksum(0L, p, data_len);
+ int4store(p + data_len, crc);
+ }
+ return 0;
+}
+
+
#ifdef MYSQL_CLIENT
/**
Query_log_event::print().
@@ -4339,6 +4452,8 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver)
// Set header lengths of Maria events
post_header_len[ANNOTATE_ROWS_EVENT-1]= ANNOTATE_ROWS_HEADER_LEN;
+ post_header_len[BINLOG_CHECKPOINT_EVENT-1]=
+ BINLOG_CHECKPOINT_HEADER_LEN;
// Sanity-check that all post header lengths are initialized.
int i;
@@ -5798,6 +5913,86 @@ Rotate_log_event::do_shall_skip(Relay_log_info *rli)
/**************************************************************************
+ Binlog_checkpoint_log_event methods
+**************************************************************************/
+
+#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
+void Binlog_checkpoint_log_event::pack_info(THD *thd, Protocol *protocol)
+{
+ protocol->store(binlog_file_name, binlog_file_len, &my_charset_bin);
+}
+#endif
+
+
+#ifdef MYSQL_CLIENT
+void Binlog_checkpoint_log_event::print(FILE *file,
+ PRINT_EVENT_INFO *print_event_info)
+{
+ Write_on_release_cache cache(&print_event_info->head_cache, file,
+ Write_on_release_cache::FLUSH_F);
+
+ if (print_event_info->short_form)
+ return;
+ print_header(&cache, print_event_info, FALSE);
+ my_b_printf(&cache, "\tBinlog checkpoint ");
+ my_b_write(&cache, (uchar*)binlog_file_name, binlog_file_len);
+ my_b_printf(&cache, "\n");
+}
+#endif /* MYSQL_CLIENT */
+
+
+#ifdef MYSQL_SERVER
+Binlog_checkpoint_log_event::Binlog_checkpoint_log_event(
+ const char *binlog_file_name_arg,
+ uint binlog_file_len_arg)
+ :Log_event(),
+ binlog_file_name(my_strndup(binlog_file_name_arg, binlog_file_len_arg,
+ MYF(MY_WME))),
+ binlog_file_len(binlog_file_len_arg)
+{
+ cache_type= EVENT_NO_CACHE;
+}
+#endif /* MYSQL_SERVER */
+
+
+Binlog_checkpoint_log_event::Binlog_checkpoint_log_event(
+ const char *buf, uint event_len,
+ const Format_description_log_event *description_event)
+ :Log_event(buf, description_event), binlog_file_name(0)
+{
+ uint8 header_size= description_event->common_header_len;
+ uint8 post_header_len=
+ description_event->post_header_len[BINLOG_CHECKPOINT_EVENT-1];
+ if (event_len < header_size + post_header_len ||
+ post_header_len < BINLOG_CHECKPOINT_HEADER_LEN)
+ return;
+ buf+= header_size;
+ /* See uint4korr and int4store below */
+ compile_time_assert(BINLOG_CHECKPOINT_HEADER_LEN == 4);
+ binlog_file_len= uint4korr(buf);
+ if (event_len - (header_size + post_header_len) < binlog_file_len)
+ return;
+ binlog_file_name= my_strndup(buf + post_header_len, binlog_file_len,
+ MYF(MY_WME));
+ return;
+}
+
+
+#ifndef MYSQL_CLIENT
+bool Binlog_checkpoint_log_event::write(IO_CACHE *file)
+{
+ uchar buf[BINLOG_CHECKPOINT_HEADER_LEN];
+ int4store(buf, binlog_file_len);
+ return write_header(file, BINLOG_CHECKPOINT_HEADER_LEN + binlog_file_len) ||
+ wrapper_my_b_safe_write(file, buf, BINLOG_CHECKPOINT_HEADER_LEN) ||
+ wrapper_my_b_safe_write(file, (const uchar *)binlog_file_name,
+ binlog_file_len) ||
+ write_footer(file);
+}
+#endif /* MYSQL_CLIENT */
+
+
+/**************************************************************************
Intvar_log_event methods
**************************************************************************/
@@ -9736,23 +9931,6 @@ Write_rows_log_event::do_before_row_operations(const Slave_reporting_capability
*/
}
- /*
- We need TIMESTAMP_NO_AUTO_SET otherwise ha_write_row() will not use fill
- any TIMESTAMP column with data from the row but instead will use
- the event's current time.
- As we replicate from TIMESTAMP to TIMESTAMP and slave has no extra
- columns, we know that all TIMESTAMP columns on slave will receive explicit
- data from the row, so TIMESTAMP_NO_AUTO_SET is ok.
- When we allow a table without TIMESTAMP to be replicated to a table having
- more columns including a TIMESTAMP column, or when we allow a TIMESTAMP
- column to be replicated into a BIGINT column and the slave's table has a
- TIMESTAMP column, then the slave's TIMESTAMP column will take its value
- from set_time() which we called earlier (consistent with SBR). And then in
- some cases we won't want TIMESTAMP_NO_AUTO_SET (will require some code to
- analyze if explicit data is provided for slave's TIMESTAMP columns).
- */
- m_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
-
/* Honor next number column if present */
m_table->next_number_field= m_table->found_next_number_field;
/*
@@ -10835,8 +11013,6 @@ Update_rows_log_event::do_before_row_operations(const Slave_reporting_capability
if ((err= find_key()))
return err;
- m_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
-
return 0;
}
@@ -11098,6 +11274,9 @@ Heartbeat_log_event::Heartbeat_log_event(const char* buf, uint event_len,
There is a dummy replacement for this in the embedded library that returns
FALSE; this is used by XtraDB to allow it to access replication stuff while
still being able to use the same plugin in both stand-alone and embedded.
+
+ In this function it's ok to use active_mi, as this is only called for
+ the main replication server.
*/
bool rpl_get_position_info(const char **log_file_name, ulonglong *log_pos,
const char **group_relay_log_name,
diff --git a/sql/log_event.h b/sql/log_event.h
index 2c2e1dcd8b9..ff13cab9cd5 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -259,6 +259,7 @@ struct sql_ex_info
#define INCIDENT_HEADER_LEN 2
#define HEARTBEAT_HEADER_LEN 0
#define ANNOTATE_ROWS_HEADER_LEN 0
+#define BINLOG_CHECKPOINT_HEADER_LEN 4
/*
Max number of possible extra bytes in a replication event compared to a
@@ -573,6 +574,43 @@ enum enum_binlog_checksum_alg {
#define BINLOG_CHECKSUM_LEN CHECKSUM_CRC32_SIGNATURE_LEN
#define BINLOG_CHECKSUM_ALG_DESC_LEN 1 /* 1 byte checksum alg descriptor */
+/*
+ These are capability numbers for MariaDB slave servers.
+
+ Newer MariaDB slaves set this to inform the master about their capabilities.
+ This allows the master to decide which events it can send to the slave
+ without breaking replication on old slaves that maybe do not understand
+ all events from newer masters.
+
+ As new releases are backwards compatible, a given capability implies also
+ all capabilities with smaller number.
+
+ Older MariaDB slaves and other MySQL slave servers do not set this, so they
+ are recorded with capability 0.
+*/
+
+/* MySQL or old MariaDB slave with no announced capability. */
+#define MARIA_SLAVE_CAPABILITY_UNKNOWN 0
+/* MariaDB >= 5.3, which understands ANNOTATE_ROWS_EVENT. */
+#define MARIA_SLAVE_CAPABILITY_ANNOTATE 1
+/*
+ MariaDB >= 5.5. This version has the capability to tolerate events omitted
+ from the binlog stream without breaking replication (MySQL slaves fail
+ because they mis-compute the offsets into the master's binlog).
+*/
+#define MARIA_SLAVE_CAPABILITY_TOLERATE_HOLES 2
+/* MariaDB > 5.5, which knows about binlog_checkpoint_log_event. */
+#define MARIA_SLAVE_CAPABILITY_BINLOG_CHECKPOINT 3
+/*
+ MariaDB server which understands MySQL 5.6 ignorable events. This server
+ can tolerate receiving any event with the LOG_EVENT_IGNORABLE_F flag set.
+*/
+#define MARIA_SLAVE_CAPABILITY_IGNORABLE 4
+
+/* Our capability. */
+#define MARIA_SLAVE_CAPABILITY_MINE MARIA_SLAVE_CAPABILITY_BINLOG_CHECKPOINT
+
+
/**
@enum Log_event_type
@@ -648,6 +686,14 @@ enum Log_event_type
MARIA_EVENTS_BEGIN= 160,
/* New Maria event numbers start from here */
ANNOTATE_ROWS_EVENT= 160,
+ /*
+ Binlog checkpoint event. Used for XA crash recovery on the master, not used
+ in replication.
+ A binlog checkpoint event specifies a binlog file such that XA crash
+ recovery can start from that file - and it is guaranteed to find all XIDs
+ that are prepared in storage engines but not yet committed.
+ */
+ BINLOG_CHECKPOINT_EVENT= 161,
/* Add new MariaDB events here - right above this comment! */
@@ -1827,6 +1873,7 @@ public:
my_free(data_buf);
}
Log_event_type get_type_code() { return QUERY_EVENT; }
+ static int dummy_event(String *packet, ulong ev_offset, uint8 checksum_alg);
#ifdef MYSQL_SERVER
bool write(IO_CACHE* file);
virtual bool write_post_header_for_derived(IO_CACHE* file) { return FALSE; }
@@ -2854,6 +2901,32 @@ private:
};
+class Binlog_checkpoint_log_event: public Log_event
+{
+public:
+ char *binlog_file_name;
+ uint binlog_file_len;
+
+#ifdef MYSQL_SERVER
+ Binlog_checkpoint_log_event(const char *binlog_file_name_arg,
+ uint binlog_file_len_arg);
+#ifdef HAVE_REPLICATION
+ void pack_info(THD *thd, Protocol *protocol);
+#endif
+#else
+ void print(FILE *file, PRINT_EVENT_INFO *print_event_info);
+#endif
+ Binlog_checkpoint_log_event(const char *buf, uint event_len,
+ const Format_description_log_event *description_event);
+ ~Binlog_checkpoint_log_event() { my_free(binlog_file_name); }
+ Log_event_type get_type_code() { return BINLOG_CHECKPOINT_EVENT;}
+ int get_data_size() { return binlog_file_len + BINLOG_CHECKPOINT_HEADER_LEN;}
+ bool is_valid() const { return binlog_file_name != 0; }
+#ifdef MYSQL_SERVER
+ bool write(IO_CACHE* file);
+#endif
+};
+
/* the classes below are for the new LOAD DATA INFILE logging */
/**
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index 244999fc431..e9afe474418 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -927,22 +927,6 @@ int Write_rows_log_event_old::do_before_row_operations(TABLE *table)
from the start.
*/
table->file->ha_start_bulk_insert(0);
- /*
- We need TIMESTAMP_NO_AUTO_SET otherwise ha_write_row() will not use fill
- any TIMESTAMP column with data from the row but instead will use
- the event's current time.
- As we replicate from TIMESTAMP to TIMESTAMP and slave has no extra
- columns, we know that all TIMESTAMP columns on slave will receive explicit
- data from the row, so TIMESTAMP_NO_AUTO_SET is ok.
- When we allow a table without TIMESTAMP to be replicated to a table having
- more columns including a TIMESTAMP column, or when we allow a TIMESTAMP
- column to be replicated into a BIGINT column and the slave's table has a
- TIMESTAMP column, then the slave's TIMESTAMP column will take its value
- from set_time() which we called earlier (consistent with SBR). And then in
- some cases we won't want TIMESTAMP_NO_AUTO_SET (will require some code to
- analyze if explicit data is provided for slave's TIMESTAMP columns).
- */
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
return error;
}
@@ -1131,8 +1115,6 @@ int Update_rows_log_event_old::do_before_row_operations(TABLE *table)
if (!m_memory)
return HA_ERR_OUT_OF_MEM;
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
-
return error;
}
@@ -2592,22 +2574,6 @@ Write_rows_log_event_old::do_before_row_operations(const Slave_reporting_capabil
from the start.
*/
m_table->file->ha_start_bulk_insert(0);
- /*
- We need TIMESTAMP_NO_AUTO_SET otherwise ha_write_row() will not use fill
- any TIMESTAMP column with data from the row but instead will use
- the event's current time.
- As we replicate from TIMESTAMP to TIMESTAMP and slave has no extra
- columns, we know that all TIMESTAMP columns on slave will receive explicit
- data from the row, so TIMESTAMP_NO_AUTO_SET is ok.
- When we allow a table without TIMESTAMP to be replicated to a table having
- more columns including a TIMESTAMP column, or when we allow a TIMESTAMP
- column to be replicated into a BIGINT column and the slave's table has a
- TIMESTAMP column, then the slave's TIMESTAMP column will take its value
- from set_time() which we called earlier (consistent with SBR). And then in
- some cases we won't want TIMESTAMP_NO_AUTO_SET (will require some code to
- analyze if explicit data is provided for slave's TIMESTAMP columns).
- */
- m_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
return error;
}
@@ -2817,8 +2783,6 @@ Update_rows_log_event_old::do_before_row_operations(const Slave_reporting_capabi
return HA_ERR_OUT_OF_MEM;
}
- m_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
-
return 0;
}
diff --git a/sql/mdl.cc b/sql/mdl.cc
index 415d56887d5..7fd522d053a 100644
--- a/sql/mdl.cc
+++ b/sql/mdl.cc
@@ -2204,7 +2204,8 @@ bool MDL_context::acquire_locks(MDL_request_list *mdl_requests,
/* Sort requests according to MDL_key. */
if (! (sort_buf= (MDL_request **)my_malloc(req_count *
sizeof(MDL_request*),
- MYF(MY_WME))))
+ MYF(MY_WME |
+ MY_THREAD_SPECIFIC))))
DBUG_RETURN(TRUE);
for (p_req= sort_buf; p_req < sort_buf + req_count; p_req++)
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index 800602fe9e1..b9f49a83b4b 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -1201,7 +1201,7 @@ bool DsMrr_impl::setup_buffer_sharing(uint key_size_in_keybuf,
uint parts= my_count_bits(key_tuple_map);
ulong rpc;
ulonglong rowids_size= rowid_buf_elem_size;
- if ((rpc= key_info->rec_per_key[parts - 1]))
+ if ((rpc= key_info->actual_rec_per_key(parts - 1)))
rowids_size= rowid_buf_elem_size * rpc;
double fraction_for_rowids=
diff --git a/sql/my_apc.cc b/sql/my_apc.cc
new file mode 100644
index 00000000000..4d0042510ae
--- /dev/null
+++ b/sql/my_apc.cc
@@ -0,0 +1,270 @@
+/*
+ Copyright (c) 2011 - 2012, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#ifndef MY_APC_STANDALONE
+
+#include "sql_priv.h"
+#include "sql_class.h"
+
+#endif
+
+/* For standalone testing of APC system, see unittest/sql/my_apc-t.cc */
+
+/*
+ Initialize the target.
+
+ @note
+ Initialization must be done prior to enabling/disabling the target, or making
+ any call requests to it.
+ Initial state after initialization is 'disabled'.
+*/
+void Apc_target::init(mysql_mutex_t *target_mutex)
+{
+ DBUG_ASSERT(!enabled);
+ LOCK_thd_data_ptr= target_mutex;
+#ifndef DBUG_OFF
+ n_calls_processed= 0;
+#endif
+}
+
+
+/*
+ Destroy the target. The target must be disabled when this call is made.
+*/
+void Apc_target::destroy()
+{
+ DBUG_ASSERT(!enabled);
+}
+
+
+/*
+ Enter ther state where the target is available for serving APC requests
+*/
+void Apc_target::enable()
+{
+ /* Ok to do without getting/releasing the mutex: */
+ enabled++;
+}
+
+
+/*
+ Make the target unavailable for serving APC requests.
+
+ @note
+ This call will serve all requests that were already enqueued
+*/
+
+void Apc_target::disable()
+{
+ bool process= FALSE;
+ mysql_mutex_lock(LOCK_thd_data_ptr);
+ if (!(--enabled))
+ process= TRUE;
+ mysql_mutex_unlock(LOCK_thd_data_ptr);
+ if (process)
+ process_apc_requests();
+}
+
+
+/* [internal] Put request qe into the request list */
+
+void Apc_target::enqueue_request(Call_request *qe)
+{
+ mysql_mutex_assert_owner(LOCK_thd_data_ptr);
+ if (apc_calls)
+ {
+ Call_request *after= apc_calls->prev;
+ qe->next= apc_calls;
+ apc_calls->prev= qe;
+
+ qe->prev= after;
+ after->next= qe;
+ }
+ else
+ {
+ apc_calls= qe;
+ qe->next= qe->prev= qe;
+ }
+}
+
+
+/*
+ [internal] Remove request qe from the request queue.
+
+ The request is not necessarily first in the queue.
+*/
+
+void Apc_target::dequeue_request(Call_request *qe)
+{
+ mysql_mutex_assert_owner(LOCK_thd_data_ptr);
+ if (apc_calls == qe)
+ {
+ if ((apc_calls= apc_calls->next) == qe)
+ {
+ apc_calls= NULL;
+ }
+ }
+
+ qe->prev->next= qe->next;
+ qe->next->prev= qe->prev;
+}
+
+#ifdef HAVE_PSI_INTERFACE
+
+/* One key for all conds */
+PSI_cond_key key_show_explain_request_COND;
+
+static PSI_cond_info show_explain_psi_conds[]=
+{
+ { &key_show_explain_request_COND, "show_explain", 0 /* not using PSI_FLAG_GLOBAL*/ }
+};
+
+void init_show_explain_psi_keys(void)
+{
+ if (PSI_server == NULL)
+ return;
+
+ PSI_server->register_cond("sql", show_explain_psi_conds,
+ array_elements(show_explain_psi_conds));
+}
+#endif
+
+
+/*
+ Make an APC (Async Procedure Call) to another thread.
+
+ @detail
+ Make an APC call: schedule it for execution and wait until the target
+ thread has executed it.
+
+ - The caller is responsible for making sure he's not posting request
+ to the thread he's calling this function from.
+
+ - The caller must have locked target_mutex. The function will release it.
+
+ @retval FALSE - Ok, the call has been made
+ @retval TRUE - Call wasnt made (either the target is in disabled state or
+ timeout occured)
+*/
+
+bool Apc_target::make_apc_call(THD *caller_thd, Apc_call *call,
+ int timeout_sec, bool *timed_out)
+{
+ bool res= TRUE;
+ *timed_out= FALSE;
+
+ if (enabled)
+ {
+ /* Create and post the request */
+ Call_request apc_request;
+ apc_request.call= call;
+ apc_request.processed= FALSE;
+ mysql_cond_init(key_show_explain_request_COND, &apc_request.COND_request,
+ NULL);
+ enqueue_request(&apc_request);
+ apc_request.what="enqueued by make_apc_call";
+
+ struct timespec abstime;
+ const int timeout= timeout_sec;
+ set_timespec(abstime, timeout);
+
+ int wait_res= 0;
+ const char *old_msg;
+ old_msg= caller_thd->enter_cond(&apc_request.COND_request,
+ LOCK_thd_data_ptr, "show_explain");
+ /* todo: how about processing other errors here? */
+ while (!apc_request.processed && (wait_res != ETIMEDOUT))
+ {
+ /* We own LOCK_thd_data_ptr */
+ wait_res= mysql_cond_timedwait(&apc_request.COND_request,
+ LOCK_thd_data_ptr, &abstime);
+ // &apc_request.LOCK_request, &abstime);
+ if (caller_thd->killed)
+ break;
+ }
+
+ if (!apc_request.processed)
+ {
+ /*
+ The wait has timed out, or this thread was KILLed.
+ Remove the request from the queue (ok to do because we own
+ LOCK_thd_data_ptr)
+ */
+ apc_request.processed= TRUE;
+ dequeue_request(&apc_request);
+ *timed_out= TRUE;
+ res= TRUE;
+ }
+ else
+ {
+ /* Request was successfully executed and dequeued by the target thread */
+ res= FALSE;
+ }
+ /*
+ exit_cond() will call mysql_mutex_unlock(LOCK_thd_data_ptr) for us:
+ */
+ caller_thd->exit_cond(old_msg);
+
+ /* Destroy all APC request data */
+ mysql_cond_destroy(&apc_request.COND_request);
+ }
+ else
+ {
+ mysql_mutex_unlock(LOCK_thd_data_ptr);
+ }
+ return res;
+}
+
+
+/*
+ Process all APC requests.
+ This should be called periodically by the APC target thread.
+*/
+
+void Apc_target::process_apc_requests()
+{
+ while (1)
+ {
+ Call_request *request;
+
+ mysql_mutex_lock(LOCK_thd_data_ptr);
+ if (!(request= get_first_in_queue()))
+ {
+ /* No requests in the queue */
+ mysql_mutex_unlock(LOCK_thd_data_ptr);
+ break;
+ }
+
+ /*
+ Remove the request from the queue (we're holding queue lock so we can be
+ sure that request owner won't try to remove it)
+ */
+ request->what="dequeued by process_apc_requests";
+ dequeue_request(request);
+ request->processed= TRUE;
+
+ request->call->call_in_target_thread();
+ request->what="func called by process_apc_requests";
+
+#ifndef DBUG_OFF
+ n_calls_processed++;
+#endif
+ mysql_cond_signal(&request->COND_request);
+ mysql_mutex_unlock(LOCK_thd_data_ptr);
+ }
+}
+
diff --git a/sql/my_apc.h b/sql/my_apc.h
new file mode 100644
index 00000000000..7f19809c082
--- /dev/null
+++ b/sql/my_apc.h
@@ -0,0 +1,138 @@
+#ifndef INCLUDES_MY_APC_H
+#define INCLUDES_MY_APC_H
+/*
+ Copyright (c) 2011 - 2012, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ Interface
+ ~~~~~~~~~
+ (
+ - This is an APC request queue
+ - We assume there is a particular owner thread which periodically calls
+ process_apc_requests() to serve the call requests.
+ - Other threads can post call requests, and block until they are exectued.
+ )
+
+ Implementation
+ ~~~~~~~~~~~~~~
+ - The target has a mutex-guarded request queue.
+
+ - After the request has been put into queue, the requestor waits for request
+ to be satisfied. The worker satisifes the request and signals the
+ requestor.
+*/
+
+class THD;
+
+/*
+ Target for asynchronous procedure calls (APCs).
+ - A target is running in some particular thread,
+ - One can make calls to it from other threads.
+*/
+class Apc_target
+{
+ mysql_mutex_t *LOCK_thd_data_ptr;
+public:
+ Apc_target() : enabled(0), apc_calls(NULL) {}
+ ~Apc_target() { DBUG_ASSERT(!enabled && !apc_calls);}
+
+ void init(mysql_mutex_t *target_mutex);
+ void destroy();
+ void enable();
+ void disable();
+
+ void process_apc_requests();
+ /*
+ A lightweight function, intended to be used in frequent checks like this:
+
+ if (apc_target.have_requests()) apc_target.process_apc_requests()
+ */
+ inline bool have_apc_requests()
+ {
+ return test(apc_calls);
+ }
+
+ /* Functor class for calls you can schedule */
+ class Apc_call
+ {
+ public:
+ /* This function will be called in the target thread */
+ virtual void call_in_target_thread()= 0;
+ virtual ~Apc_call() {}
+ };
+
+ /* Make a call in the target thread (see function definition for details) */
+ bool make_apc_call(THD *caller_thd, Apc_call *call, int timeout_sec, bool *timed_out);
+
+#ifndef DBUG_OFF
+ int n_calls_processed; /* Number of calls served by this target */
+#endif
+private:
+ class Call_request;
+
+ /*
+ Non-zero value means we're enabled. It's an int, not bool, because one can
+ call enable() N times (and then needs to call disable() N times before the
+ target is really disabled)
+ */
+ int enabled;
+
+ /*
+ Circular, double-linked list of all enqueued call requests.
+ We use this structure, because we
+ - process requests sequentially: requests are added at the end of the
+ list and removed from the front. With circular list, we can keep one
+ pointer, and access both front an back of the list with it.
+ - a thread that has posted a request may time out (or be KILLed) and
+ cancel the request, which means we need a fast request-removal
+ operation.
+ */
+ Call_request *apc_calls;
+
+ class Call_request
+ {
+ public:
+ Apc_call *call; /* Functor to be called */
+
+ /* The caller will actually wait for "processed==TRUE" */
+ bool processed;
+
+ /* Condition that will be signalled when the request has been served */
+ mysql_cond_t COND_request;
+
+ /* Double linked-list linkage */
+ Call_request *next;
+ Call_request *prev;
+
+ const char *what; /* (debug) state of the request */
+ };
+
+ void enqueue_request(Call_request *qe);
+ void dequeue_request(Call_request *qe);
+
+ /* return the first call request in queue, or NULL if there are none enqueued */
+ Call_request *get_first_in_queue()
+ {
+ return apc_calls;
+ }
+};
+
+#ifdef HAVE_PSI_INTERFACE
+void init_show_explain_psi_keys(void);
+#endif
+
+#endif //INCLUDES_MY_APC_H
+
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 14737cd69bb..202cabaec58 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -52,6 +52,7 @@
#include "des_key_file.h" // load_des_key_file
#include "sql_manager.h" // stop_handle_manager, start_handle_manager
#include "sql_expression_cache.h" // subquery_cache_miss, subquery_cache_hit
+#include "sys_vars_shared.h"
#include <m_ctype.h>
#include <my_dir.h>
@@ -471,7 +472,7 @@ ulong back_log, connect_timeout, concurrency, server_id;
ulong table_cache_size, table_def_size;
ulong what_to_log;
ulong slow_launch_time, slave_open_temp_tables;
-ulong open_files_limit, max_binlog_size, max_relay_log_size;
+ulong open_files_limit, max_binlog_size;
ulong slave_trans_retries;
uint slave_net_timeout;
ulong slave_exec_mode_options;
@@ -488,6 +489,7 @@ ulong executed_events=0;
query_id_t global_query_id;
my_atomic_rwlock_t global_query_id_lock;
my_atomic_rwlock_t thread_running_lock;
+my_atomic_rwlock_t statistics_lock;
ulong aborted_threads, aborted_connects;
ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size;
ulong delayed_insert_threads, delayed_insert_writes, delayed_rows_in_use;
@@ -497,6 +499,7 @@ ulong binlog_cache_use= 0, binlog_cache_disk_use= 0;
ulong binlog_stmt_cache_use= 0, binlog_stmt_cache_disk_use= 0;
ulong max_connections, max_connect_errors;
ulong extra_max_connections;
+ulong slave_retried_transactions;
ulonglong denied_connections;
my_decimal decimal_zero;
@@ -654,6 +657,7 @@ SHOW_COMP_OPTION have_ssl, have_symlink, have_dlopen, have_query_cache;
SHOW_COMP_OPTION have_geometry, have_rtree_keys;
SHOW_COMP_OPTION have_crypt, have_compress;
SHOW_COMP_OPTION have_profiling;
+SHOW_COMP_OPTION have_openssl;
/* Thread specific variables */
@@ -690,8 +694,7 @@ pthread_attr_t connection_attrib;
mysql_mutex_t LOCK_server_started;
mysql_cond_t COND_server_started;
-int mysqld_server_started= 0;
-
+int mysqld_server_started=0, mysqld_server_initialized= 0;
File_parser_dummy_hook file_parser_dummy_hook;
/* replication parameters, if master_host is not NULL, we are a slave */
@@ -733,14 +736,16 @@ char **orig_argv;
#ifdef HAVE_PSI_INTERFACE
#ifdef HAVE_MMAP
-PSI_mutex_key key_PAGE_lock, key_LOCK_sync, key_LOCK_active, key_LOCK_pool;
+PSI_mutex_key key_PAGE_lock, key_LOCK_sync, key_LOCK_active, key_LOCK_pool,
+ key_LOCK_pending_checkpoint;
#endif /* HAVE_MMAP */
#ifdef HAVE_OPENSSL
PSI_mutex_key key_LOCK_des_key_file;
#endif /* HAVE_OPENSSL */
-PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_prep_xids,
+PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
+ key_BINLOG_LOCK_binlog_background_thread,
key_delayed_insert_mutex, key_hash_filo_lock, key_LOCK_active_mi,
key_LOCK_connection_count, key_LOCK_crypt, key_LOCK_delayed_create,
key_LOCK_delayed_insert, key_LOCK_delayed_status, key_LOCK_error_log,
@@ -774,6 +779,7 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_LOCK_sync, "TC_LOG_MMAP::LOCK_sync", 0},
{ &key_LOCK_active, "TC_LOG_MMAP::LOCK_active", 0},
{ &key_LOCK_pool, "TC_LOG_MMAP::LOCK_pool", 0},
+ { &key_LOCK_pool, "TC_LOG_MMAP::LOCK_pending_checkpoint", 0},
#endif /* HAVE_MMAP */
#ifdef HAVE_OPENSSL
@@ -781,7 +787,8 @@ static PSI_mutex_info all_server_mutexes[]=
#endif /* HAVE_OPENSSL */
{ &key_BINLOG_LOCK_index, "MYSQL_BIN_LOG::LOCK_index", 0},
- { &key_BINLOG_LOCK_prep_xids, "MYSQL_BIN_LOG::LOCK_prep_xids", 0},
+ { &key_BINLOG_LOCK_xid_list, "MYSQL_BIN_LOG::LOCK_xid_list", 0},
+ { &key_BINLOG_LOCK_binlog_background_thread, "MYSQL_BIN_LOG::LOCK_binlog_background_thread", 0},
{ &key_RELAYLOG_LOCK_index, "MYSQL_RELAY_LOG::LOCK_index", 0},
{ &key_delayed_insert_mutex, "Delayed_insert::mutex", 0},
{ &key_hash_filo_lock, "hash_filo::lock", 0},
@@ -849,7 +856,9 @@ static PSI_rwlock_info all_server_rwlocks[]=
PSI_cond_key key_PAGE_cond, key_COND_active, key_COND_pool;
#endif /* HAVE_MMAP */
-PSI_cond_key key_BINLOG_COND_prep_xids, key_BINLOG_update_cond,
+PSI_cond_key key_BINLOG_COND_xid_list, key_BINLOG_update_cond,
+ key_BINLOG_COND_binlog_background_thread,
+ key_BINLOG_COND_binlog_background_thread_end,
key_COND_cache_status_changed, key_COND_manager,
key_COND_rpl_status, key_COND_server_started,
key_delayed_insert_cond, key_delayed_insert_cond_client,
@@ -877,8 +886,10 @@ static PSI_cond_info all_server_conds[]=
{ &key_COND_pool, "TC_LOG_MMAP::COND_pool", 0},
{ &key_TC_LOG_MMAP_COND_queue_busy, "TC_LOG_MMAP::COND_queue_busy", 0},
#endif /* HAVE_MMAP */
- { &key_BINLOG_COND_prep_xids, "MYSQL_BIN_LOG::COND_prep_xids", 0},
+ { &key_BINLOG_COND_xid_list, "MYSQL_BIN_LOG::COND_xid_list", 0},
{ &key_BINLOG_update_cond, "MYSQL_BIN_LOG::update_cond", 0},
+ { &key_BINLOG_COND_binlog_background_thread, "MYSQL_BIN_LOG::COND_binlog_background_thread", 0},
+ { &key_BINLOG_COND_binlog_background_thread_end, "MYSQL_BIN_LOG::COND_binlog_background_thread_end", 0},
{ &key_BINLOG_COND_queue_busy, "MYSQL_BIN_LOG::COND_queue_busy", 0},
{ &key_RELAYLOG_update_cond, "MYSQL_RELAY_LOG::update_cond", 0},
{ &key_RELAYLOG_COND_queue_busy, "MYSQL_RELAY_LOG::COND_queue_busy", 0},
@@ -1110,7 +1121,7 @@ private:
void Buffered_logs::init()
{
- init_alloc_root(&m_root, 1024, 0);
+ init_alloc_root(&m_root, 1024, 0, MYF(0));
}
void Buffered_logs::cleanup()
@@ -1754,6 +1765,7 @@ extern "C" void unireg_abort(int exit_code)
static void mysqld_exit(int exit_code)
{
+ DBUG_ENTER("mysqld_exit");
/*
Important note: we wait for the signal thread to end,
but if a kill -15 signal was sent, the signal thread did
@@ -1765,6 +1777,7 @@ static void mysqld_exit(int exit_code)
clean_up_error_log_mutex();
my_end((opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0));
shutdown_performance_schema(); // we do it as late as possible
+ DBUG_LEAVE;
exit(exit_code); /* purecov: inspected */
}
@@ -1776,7 +1789,12 @@ void clean_up(bool print_message)
if (cleanup_done++)
return; /* purecov: inspected */
- close_active_mi();
+#ifdef HAVE_REPLICATION
+ // We must call end_slave() as clean_up may have been called during startup
+ end_slave();
+ if (use_slave_mask)
+ bitmap_free(&slave_error_mask);
+#endif
stop_handle_manager();
release_ddl_log();
@@ -1791,10 +1809,6 @@ void clean_up(bool print_message)
injector::free_instance();
mysql_bin_log.cleanup();
-#ifdef HAVE_REPLICATION
- if (use_slave_mask)
- bitmap_free(&slave_error_mask);
-#endif
my_tz_free();
my_dboptions_cache_free();
ignore_db_dirs_free();
@@ -1872,6 +1886,7 @@ void clean_up(bool print_message)
sys_var_end();
my_atomic_rwlock_destroy(&global_query_id_lock);
my_atomic_rwlock_destroy(&thread_running_lock);
+ my_atomic_rwlock_destroy(&statistics_lock);
mysql_mutex_lock(&LOCK_thread_count);
DBUG_PRINT("quit", ("got thread count lock"));
ready_to_exit=1;
@@ -2472,9 +2487,13 @@ void unlink_thd(THD *thd)
sync feature has been shut down at this point.
*/
DBUG_EXECUTE_IF("sleep_after_lock_thread_count_before_delete_thd", sleep(5););
+ /*
+ We must delete thd inside the lock to ensure that we don't start cleanup
+ before THD is deleted
+ */
+ delete thd;
mysql_mutex_unlock(&LOCK_thread_count);
- delete thd;
DBUG_VOID_RETURN;
}
@@ -2581,7 +2600,7 @@ bool one_thread_per_connection_end(THD *thd, bool put_in_cache)
DBUG_ENTER("one_thread_per_connection_end");
unlink_thd(thd);
/* Mark that current_thd is not valid anymore */
- my_pthread_setspecific_ptr(THR_THD, 0);
+ set_current_thd(0);
if (put_in_cache)
{
mysql_mutex_lock(&LOCK_thread_count);
@@ -2593,9 +2612,10 @@ bool one_thread_per_connection_end(THD *thd, bool put_in_cache)
/* It's safe to broadcast outside a lock (COND... is not deleted here) */
DBUG_PRINT("signal", ("Broadcasting COND_thread_count"));
+ mysql_cond_broadcast(&COND_thread_count);
+
DBUG_LEAVE; // Must match DBUG_ENTER()
my_thread_end();
- mysql_cond_broadcast(&COND_thread_count);
pthread_exit(0);
return 0; // Avoid compiler warnings
@@ -3129,9 +3149,9 @@ void my_message_sql(uint error, const char *str, myf MyFlags)
THD *thd= current_thd;
MYSQL_ERROR::enum_warning_level level;
sql_print_message_func func;
-
DBUG_ENTER("my_message_sql");
- DBUG_PRINT("error", ("error: %u message: '%s' Flag: %d", error, str, MyFlags));
+ DBUG_PRINT("error", ("error: %u message: '%s' Flag: %lu", error, str,
+ MyFlags));
DBUG_ASSERT(str != NULL);
DBUG_ASSERT(error != 0);
@@ -3360,6 +3380,7 @@ SHOW_VAR com_status_vars[]= {
{"show_engine_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_STATUS]), SHOW_LONG_STATUS},
{"show_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_EVENTS]), SHOW_LONG_STATUS},
{"show_errors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ERRORS]), SHOW_LONG_STATUS},
+ {"show_explain", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_EXPLAIN]), SHOW_LONG_STATUS},
{"show_fields", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FIELDS]), SHOW_LONG_STATUS},
#ifndef DBUG_OFF
{"show_function_code", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FUNC_CODE]), SHOW_LONG_STATUS},
@@ -3391,8 +3412,8 @@ SHOW_VAR com_status_vars[]= {
{"show_user_statistics", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_USER_STATS]), SHOW_LONG_STATUS},
{"show_variables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_VARIABLES]), SHOW_LONG_STATUS},
{"show_warnings", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_WARNS]), SHOW_LONG_STATUS},
- {"slave_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_START]), SHOW_LONG_STATUS},
- {"slave_stop", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_STOP]), SHOW_LONG_STATUS},
+ {"start_all_slaves", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_ALL_START]), SHOW_LONG_STATUS},
+ {"start_slave", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_START]), SHOW_LONG_STATUS},
{"stmt_close", (char*) offsetof(STATUS_VAR, com_stmt_close), SHOW_LONG_STATUS},
{"stmt_execute", (char*) offsetof(STATUS_VAR, com_stmt_execute), SHOW_LONG_STATUS},
{"stmt_fetch", (char*) offsetof(STATUS_VAR, com_stmt_fetch), SHOW_LONG_STATUS},
@@ -3400,6 +3421,8 @@ SHOW_VAR com_status_vars[]= {
{"stmt_reprepare", (char*) offsetof(STATUS_VAR, com_stmt_reprepare), SHOW_LONG_STATUS},
{"stmt_reset", (char*) offsetof(STATUS_VAR, com_stmt_reset), SHOW_LONG_STATUS},
{"stmt_send_long_data", (char*) offsetof(STATUS_VAR, com_stmt_send_long_data), SHOW_LONG_STATUS},
+ {"stop_all_slaves", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_ALL_STOP]), SHOW_LONG_STATUS},
+ {"stop_slave", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_STOP]), SHOW_LONG_STATUS},
{"truncate", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_TRUNCATE]), SHOW_LONG_STATUS},
{"uninstall_plugin", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UNINSTALL_PLUGIN]), SHOW_LONG_STATUS},
{"unlock_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UNLOCK_TABLES]), SHOW_LONG_STATUS},
@@ -3414,14 +3437,73 @@ SHOW_VAR com_status_vars[]= {
{NullS, NullS, SHOW_LONG}
};
+#ifdef SAFEMALLOC
+/*
+ Return the id for the current THD, to allow safemalloc to associate
+ the memory with the right id.
+*/
+
+extern "C" my_thread_id mariadb_dbug_id()
+{
+ THD *thd;
+ if ((thd= current_thd))
+ {
+ return thd->thread_id;
+ }
+ return my_thread_dbug_id();
+}
+#endif /* SAFEMALLOC */
+
+/* Thread Mem Usage By P.Linux */
+extern "C" {
+static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific)
+{
+ /* If thread specific memory */
+ if (is_thread_specific)
+ {
+ THD *thd= current_thd;
+ if (mysqld_server_initialized || thd)
+ {
+ /*
+ THD may not be set if we are called from my_net_init() before THD
+ thread has started.
+ However, this should never happen, so better to assert and
+ fix this.
+ */
+ DBUG_ASSERT(thd);
+ if (thd)
+ {
+ DBUG_PRINT("info", ("memory_used: %lld size: %lld",
+ (longlong) thd->status_var.memory_used, size));
+ thd->status_var.memory_used+= size;
+ DBUG_ASSERT((longlong) thd->status_var.memory_used >= 0);
+ }
+ }
+ }
+ // workaround for gcc 4.2.4-1ubuntu4 -fPIE (from DEB_BUILD_HARDENING=1)
+ int64 volatile * volatile ptr=&global_status_var.memory_used;
+ my_atomic_add64(ptr, size);
+}
+}
+
+
+/*
+ Init common variables
+*/
+
static int init_common_variables()
{
umask(((~my_umask) & 0666));
my_decimal_set_zero(&decimal_zero); // set decimal_zero constant;
+ set_malloc_size_cb(my_malloc_size_cb_func);
+
tzset(); // Set tzname
sf_leaking_memory= 0; // no memory leaks from now on
+#ifdef SAFEMALLOC
+ sf_malloc_dbug_id= mariadb_dbug_id;
+#endif
max_system_variables.pseudo_thread_id= (ulong)~0;
server_start_time= flush_status_time= my_time(0);
@@ -3881,6 +3963,7 @@ You should consider changing lower_case_table_names to 1 or 2",
static int init_thread_environment()
{
+ DBUG_ENTER("init_thread_environment");
mysql_mutex_init(key_LOCK_thread_count, &LOCK_thread_count, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_status, &LOCK_status, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_delayed_insert,
@@ -3948,6 +4031,7 @@ static int init_thread_environment()
#ifdef HAVE_EVENT_SCHEDULER
Events::init_mutexes();
#endif
+ init_show_explain_psi_keys();
/* Parameter for threads created for connections */
(void) pthread_attr_init(&connection_attrib);
(void) pthread_attr_setdetachstate(&connection_attrib,
@@ -3958,9 +4042,9 @@ static int init_thread_environment()
pthread_key_create(&THR_MALLOC,NULL))
{
sql_print_error("Can't create thread-keys");
- return 1;
+ DBUG_RETURN(1);
}
- return 0;
+ DBUG_RETURN(0);
}
@@ -4450,7 +4534,7 @@ a file name for --log-bin-index option", opt_binlog_index_name);
}
if (opt_bin_log && mysql_bin_log.open(opt_bin_logname, LOG_BIN, 0,
- WRITE_CACHE, 0, max_binlog_size, 0, TRUE))
+ WRITE_CACHE, max_binlog_size, 0, TRUE))
unireg_abort(1);
#ifdef HAVE_REPLICATION
@@ -4624,6 +4708,7 @@ static void test_lc_time_sz()
}
#endif//DBUG_OFF
+
#ifdef __WIN__
int win_main(int argc, char **argv)
#else
@@ -4636,6 +4721,8 @@ int mysqld_main(int argc, char **argv)
*/
my_progname= argv[0];
sf_leaking_memory= 1; // no safemalloc memory leak reports if we exit early
+ mysqld_server_started= mysqld_server_initialized= 0;
+
#ifdef HAVE_NPTL
ld_assume_kernel_is_set= (getenv("LD_ASSUME_KERNEL") != 0);
#endif
@@ -4677,7 +4764,7 @@ int mysqld_main(int argc, char **argv)
my_getopt_skip_unknown= TRUE;
/* prepare all_early_options array */
- my_init_dynamic_array(&all_early_options, sizeof(my_option), 100, 25);
+ my_init_dynamic_array(&all_early_options, sizeof(my_option), 100, 25, MYF(0));
sys_var_add_options(&all_early_options, sys_var::PARSE_EARLY);
add_terminator(&all_early_options);
@@ -4912,16 +4999,6 @@ int mysqld_main(int argc, char **argv)
opt_skip_slave_start= 1;
binlog_unsafe_map_init();
- /*
- init_slave() must be called after the thread keys are created.
- Some parts of the code (e.g. SHOW STATUS LIKE 'slave_running' and other
- places) assume that active_mi != 0, so let's fail if it's 0 (out of
- memory); a message has already been printed.
- */
- if (init_slave() && !active_mi)
- {
- unireg_abort(1);
- }
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
initialize_performance_schema_acl(opt_bootstrap);
@@ -4939,6 +5016,12 @@ int mysqld_main(int argc, char **argv)
execute_ddl_log_recovery();
+ /*
+ We must have LOCK_open before LOCK_global_system_variables because
+ LOCK_open is held while sql_plugin.c::intern_sys_var_ptr() is called.
+ */
+ mysql_mutex_record_order(&LOCK_open, &LOCK_global_system_variables);
+
if (Events::init(opt_noacl || opt_bootstrap))
unireg_abort(1);
@@ -4954,21 +5037,30 @@ int mysqld_main(int argc, char **argv)
exit(0);
}
}
+
+ /* It's now safe to use thread specific memory */
+ mysqld_server_initialized= 1;
+
+ create_shutdown_thread();
+ start_handle_manager();
+
+ /*
+ init_slave() must be called after the thread keys are created.
+ Some parts of the code (e.g. SHOW STATUS LIKE 'slave_running' and other
+ places) assume that active_mi != 0, so let's fail if it's 0 (out of
+ memory); a message has already been printed.
+ */
+ if (init_slave() && !active_mi)
+ {
+ unireg_abort(1);
+ }
+
if (opt_init_file && *opt_init_file)
{
if (read_init_file(opt_init_file))
unireg_abort(1);
}
- /*
- We must have LOCK_open before LOCK_global_system_variables because
- LOCK_open is hold while sql_plugin.c::intern_sys_var_ptr() is called.
- */
- mysql_mutex_record_order(&LOCK_open, &LOCK_global_system_variables);
-
- create_shutdown_thread();
- start_handle_manager();
-
sql_print_information(ER_DEFAULT(ER_STARTUP),my_progname,server_version,
((unix_sock == INVALID_SOCKET) ? (char*) ""
: mysqld_unix_port),
@@ -4978,7 +5070,6 @@ int mysqld_main(int argc, char **argv)
Service.SetRunning();
#endif
-
/* Signal threads waiting for server to be started */
mysql_mutex_lock(&LOCK_server_started);
mysqld_server_started= 1;
@@ -5266,7 +5357,7 @@ static void bootstrap(MYSQL_FILE *file)
THD *thd= new THD;
thd->bootstrap=1;
- my_net_init(&thd->net,(st_vio*) 0);
+ my_net_init(&thd->net,(st_vio*) 0, MYF(0));
thd->max_client_packet_length= thd->net.max_packet;
thd->security_ctx->master_access= ~(ulong)0;
thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
@@ -5683,17 +5774,20 @@ void handle_connections_sockets()
** Don't allow too many connections
*/
+ DBUG_PRINT("info", ("Creating THD for new connection"));
if (!(thd= new THD))
{
(void) mysql_socket_shutdown(new_sock, SHUT_RDWR);
(void) closesocket(new_sock);
continue;
}
+ /* Set to get io buffers to be part of THD */
+ set_current_thd(thd);
if (!(vio_tmp=vio_new(new_sock,
sock == unix_sock ? VIO_TYPE_SOCKET :
VIO_TYPE_TCPIP,
sock == unix_sock ? VIO_LOCALHOST: 0)) ||
- my_net_init(&thd->net,vio_tmp))
+ my_net_init(&thd->net, vio_tmp, MYF(MY_THREAD_SPECIFIC)))
{
/*
Only delete the temporary vio if we didn't already attach it to the
@@ -5708,6 +5802,7 @@ void handle_connections_sockets()
(void) closesocket(new_sock);
}
delete thd;
+ set_current_thd(0);
continue;
}
if (sock == unix_sock)
@@ -5719,6 +5814,7 @@ void handle_connections_sockets()
thd->scheduler= extra_thread_scheduler;
}
create_new_thread(thd);
+ set_current_thd(0);
}
DBUG_VOID_RETURN;
}
@@ -5812,16 +5908,19 @@ pthread_handler_t handle_connections_namedpipes(void *arg)
CloseHandle(hConnectedPipe);
continue;
}
+ set_current_thd(thd);
if (!(thd->net.vio= vio_new_win32pipe(hConnectedPipe)) ||
- my_net_init(&thd->net, thd->net.vio))
+ my_net_init(&thd->net, thd->net.vio, MYF(MY_THREAD_SPECIFIC)))
{
close_connection(thd, ER_OUT_OF_RESOURCES);
delete thd;
+ set_current_thd(0);
continue;
}
/* Host is unknown */
thd->security_ctx->host= my_strdup(my_localhost, MYF(0));
create_new_thread(thd);
+ set_current_thd(0);
}
CloseHandle(connectOverlapped.hEvent);
DBUG_LEAVE;
@@ -6001,6 +6100,7 @@ pthread_handler_t handle_connections_shared_memory(void *arg)
errmsg= "Could not set client to read mode";
goto errorconn;
}
+ set_current_thd(thd);
if (!(thd->net.vio= vio_new_win32shared_memory(handle_client_file_map,
handle_client_map,
event_client_wrote,
@@ -6008,7 +6108,7 @@ pthread_handler_t handle_connections_shared_memory(void *arg)
event_server_wrote,
event_server_read,
event_conn_closed)) ||
- my_net_init(&thd->net, thd->net.vio))
+ my_net_init(&thd->net, thd->net.vio, MYF(MY_THREAD_SPECIFIC)))
{
close_connection(thd, ER_OUT_OF_RESOURCES);
errmsg= 0;
@@ -6017,6 +6117,7 @@ pthread_handler_t handle_connections_shared_memory(void *arg)
thd->security_ctx->host= my_strdup(my_localhost, MYF(0)); /* Host is unknown */
create_new_thread(thd);
connect_number++;
+ set_current_thd(thd);
continue;
errorconn:
@@ -6044,6 +6145,7 @@ errorconn:
CloseHandle(event_conn_closed);
delete thd;
}
+ set_current_thd(0);
/* End shared memory handling */
error:
@@ -6081,6 +6183,7 @@ error:
*/
struct my_option my_long_options[]=
+
{
{"help", '?', "Display this help and exit.",
&opt_help, &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
@@ -6546,63 +6649,53 @@ static int show_rpl_status(THD *thd, SHOW_VAR *var, char *buff)
static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff)
{
+ Master_info *mi;
var->type= SHOW_MY_BOOL;
- mysql_mutex_lock(&LOCK_active_mi);
var->value= buff;
- *((my_bool *)buff)= (my_bool) (active_mi &&
- active_mi->slave_running == MYSQL_SLAVE_RUN_CONNECT &&
- active_mi->rli.slave_running);
- mysql_mutex_unlock(&LOCK_active_mi);
- return 0;
-}
-
-static int show_slave_retried_trans(THD *thd, SHOW_VAR *var, char *buff)
-{
- /*
- TODO: with multimaster, have one such counter per line in
- SHOW SLAVE STATUS, and have the sum over all lines here.
- */
mysql_mutex_lock(&LOCK_active_mi);
- if (active_mi)
- {
- var->type= SHOW_LONG;
- var->value= buff;
- mysql_mutex_lock(&active_mi->rli.data_lock);
- *((long *)buff)= (long)active_mi->rli.retried_trans;
- mysql_mutex_unlock(&active_mi->rli.data_lock);
- }
+ mi= master_info_index->
+ get_master_info(&thd->variables.default_master_connection,
+ MYSQL_ERROR::WARN_LEVEL_NOTE);
+ if (mi)
+ *((my_bool *)buff)= (my_bool) (mi->slave_running ==
+ MYSQL_SLAVE_RUN_CONNECT &&
+ mi->rli.slave_running);
else
var->type= SHOW_UNDEF;
mysql_mutex_unlock(&LOCK_active_mi);
return 0;
}
+
static int show_slave_received_heartbeats(THD *thd, SHOW_VAR *var, char *buff)
{
+ Master_info *mi;
+ var->type= SHOW_LONGLONG;
+ var->value= buff;
mysql_mutex_lock(&LOCK_active_mi);
- if (active_mi)
- {
- var->type= SHOW_LONGLONG;
- var->value= buff;
- mysql_mutex_lock(&active_mi->rli.data_lock);
- *((longlong *)buff)= active_mi->received_heartbeats;
- mysql_mutex_unlock(&active_mi->rli.data_lock);
- }
+ mi= master_info_index->
+ get_master_info(&thd->variables.default_master_connection,
+ MYSQL_ERROR::WARN_LEVEL_NOTE);
+ if (mi)
+ *((longlong *)buff)= mi->received_heartbeats;
else
var->type= SHOW_UNDEF;
mysql_mutex_unlock(&LOCK_active_mi);
return 0;
}
+
static int show_heartbeat_period(THD *thd, SHOW_VAR *var, char *buff)
{
+ Master_info *mi;
+ var->type= SHOW_CHAR;
+ var->value= buff;
mysql_mutex_lock(&LOCK_active_mi);
- if (active_mi)
- {
- var->type= SHOW_CHAR;
- var->value= buff;
- sprintf(buff, "%.3f", active_mi->heartbeat_period);
- }
+ mi= master_info_index->
+ get_master_info(&thd->variables.default_master_connection,
+ MYSQL_ERROR::WARN_LEVEL_NOTE);
+ if (mi)
+ sprintf(buff, "%.3f", mi->heartbeat_period);
else
var->type= SHOW_UNDEF;
mysql_mutex_unlock(&LOCK_active_mi);
@@ -6963,7 +7056,7 @@ SHOW_VAR status_vars[]= {
{"Bytes_received", (char*) offsetof(STATUS_VAR, bytes_received), SHOW_LONGLONG_STATUS},
{"Bytes_sent", (char*) offsetof(STATUS_VAR, bytes_sent), SHOW_LONGLONG_STATUS},
{"Com", (char*) com_status_vars, SHOW_ARRAY},
- {"Compression", (char*) &show_net_compression, SHOW_FUNC},
+ {"Compression", (char*) &show_net_compression, SHOW_SIMPLE_FUNC},
{"Connections", (char*) &thread_id, SHOW_LONG_NOFLUSH},
{"Cpu_time", (char*) offsetof(STATUS_VAR, cpu_time), SHOW_DOUBLE_STATUS},
{"Created_tmp_disk_tables", (char*) offsetof(STATUS_VAR, created_tmp_disk_tables), SHOW_LONG_STATUS},
@@ -7011,18 +7104,19 @@ SHOW_VAR status_vars[]= {
{"Key", (char*) &show_default_keycache, SHOW_FUNC},
{"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS},
{"Max_used_connections", (char*) &max_used_connections, SHOW_LONG},
+ {"Memory_used", (char*) offsetof(STATUS_VAR, memory_used), SHOW_LONGLONG_STATUS},
{"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_NOFLUSH},
{"Open_files", (char*) &my_file_opened, SHOW_LONG_NOFLUSH},
{"Open_streams", (char*) &my_stream_opened, SHOW_LONG_NOFLUSH},
- {"Open_table_definitions", (char*) &show_table_definitions, SHOW_FUNC},
- {"Open_tables", (char*) &show_open_tables, SHOW_FUNC},
+ {"Open_table_definitions", (char*) &show_table_definitions, SHOW_SIMPLE_FUNC},
+ {"Open_tables", (char*) &show_open_tables, SHOW_SIMPLE_FUNC},
{"Opened_files", (char*) &my_file_total_opened, SHOW_LONG_NOFLUSH},
{"Opened_table_definitions", (char*) offsetof(STATUS_VAR, opened_shares), SHOW_LONG_STATUS},
{"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS},
{"Opened_views", (char*) offsetof(STATUS_VAR, opened_views), SHOW_LONG_STATUS},
- {"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_FUNC},
- {"Rows_read", (char*) offsetof(STATUS_VAR, rows_read), SHOW_LONGLONG_STATUS},
+ {"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_SIMPLE_FUNC},
{"Rows_sent", (char*) offsetof(STATUS_VAR, rows_sent), SHOW_LONGLONG_STATUS},
+ {"Rows_read", (char*) offsetof(STATUS_VAR, rows_read), SHOW_LONGLONG_STATUS},
{"Rows_tmp_read", (char*) offsetof(STATUS_VAR, rows_tmp_read), SHOW_LONGLONG_STATUS},
#ifdef HAVE_QUERY_CACHE
{"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_NOFLUSH},
@@ -7034,10 +7128,10 @@ SHOW_VAR status_vars[]= {
{"Qcache_queries_in_cache", (char*) &query_cache.queries_in_cache, SHOW_LONG_NOFLUSH},
{"Qcache_total_blocks", (char*) &query_cache.total_blocks, SHOW_LONG_NOFLUSH},
#endif /*HAVE_QUERY_CACHE*/
- {"Queries", (char*) &show_queries, SHOW_FUNC},
+ {"Queries", (char*) &show_queries, SHOW_SIMPLE_FUNC},
{"Questions", (char*) offsetof(STATUS_VAR, questions), SHOW_LONG_STATUS},
#ifdef HAVE_REPLICATION
- {"Rpl_status", (char*) &show_rpl_status, SHOW_FUNC},
+ {"Rpl_status", (char*) &show_rpl_status, SHOW_SIMPLE_FUNC},
#endif
{"Select_full_join", (char*) offsetof(STATUS_VAR, select_full_join_count), SHOW_LONG_STATUS},
{"Select_full_range_join", (char*) offsetof(STATUS_VAR, select_full_range_join_count), SHOW_LONG_STATUS},
@@ -7046,10 +7140,10 @@ SHOW_VAR status_vars[]= {
{"Select_scan", (char*) offsetof(STATUS_VAR, select_scan_count), SHOW_LONG_STATUS},
{"Slave_open_temp_tables", (char*) &slave_open_temp_tables, SHOW_LONG},
#ifdef HAVE_REPLICATION
- {"Slave_heartbeat_period", (char*) &show_heartbeat_period, SHOW_FUNC},
- {"Slave_received_heartbeats",(char*) &show_slave_received_heartbeats, SHOW_FUNC},
- {"Slave_retried_transactions",(char*) &show_slave_retried_trans, SHOW_FUNC},
- {"Slave_running", (char*) &show_slave_running, SHOW_FUNC},
+ {"Slave_retried_transactions",(char*)&slave_retried_transactions, SHOW_LONG},
+ {"Slave_heartbeat_period", (char*) &show_heartbeat_period, SHOW_SIMPLE_FUNC},
+ {"Slave_received_heartbeats",(char*) &show_slave_received_heartbeats, SHOW_SIMPLE_FUNC},
+ {"Slave_running", (char*) &show_slave_running, SHOW_SIMPLE_FUNC},
#endif
{"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG},
{"Slow_queries", (char*) offsetof(STATUS_VAR, long_query_count), SHOW_LONG_STATUS},
@@ -7059,29 +7153,29 @@ SHOW_VAR status_vars[]= {
{"Sort_scan", (char*) offsetof(STATUS_VAR, filesort_scan_count), SHOW_LONG_STATUS},
#ifdef HAVE_OPENSSL
#ifndef EMBEDDED_LIBRARY
- {"Ssl_accept_renegotiates", (char*) &show_ssl_ctx_sess_accept_renegotiate, SHOW_FUNC},
- {"Ssl_accepts", (char*) &show_ssl_ctx_sess_accept, SHOW_FUNC},
- {"Ssl_callback_cache_hits", (char*) &show_ssl_ctx_sess_cb_hits, SHOW_FUNC},
- {"Ssl_cipher", (char*) &show_ssl_get_cipher, SHOW_FUNC},
- {"Ssl_cipher_list", (char*) &show_ssl_get_cipher_list, SHOW_FUNC},
- {"Ssl_client_connects", (char*) &show_ssl_ctx_sess_connect, SHOW_FUNC},
- {"Ssl_connect_renegotiates", (char*) &show_ssl_ctx_sess_connect_renegotiate, SHOW_FUNC},
- {"Ssl_ctx_verify_depth", (char*) &show_ssl_ctx_get_verify_depth, SHOW_FUNC},
- {"Ssl_ctx_verify_mode", (char*) &show_ssl_ctx_get_verify_mode, SHOW_FUNC},
- {"Ssl_default_timeout", (char*) &show_ssl_get_default_timeout, SHOW_FUNC},
- {"Ssl_finished_accepts", (char*) &show_ssl_ctx_sess_accept_good, SHOW_FUNC},
- {"Ssl_finished_connects", (char*) &show_ssl_ctx_sess_connect_good, SHOW_FUNC},
- {"Ssl_session_cache_hits", (char*) &show_ssl_ctx_sess_hits, SHOW_FUNC},
- {"Ssl_session_cache_misses", (char*) &show_ssl_ctx_sess_misses, SHOW_FUNC},
- {"Ssl_session_cache_mode", (char*) &show_ssl_ctx_get_session_cache_mode, SHOW_FUNC},
- {"Ssl_session_cache_overflows", (char*) &show_ssl_ctx_sess_cache_full, SHOW_FUNC},
- {"Ssl_session_cache_size", (char*) &show_ssl_ctx_sess_get_cache_size, SHOW_FUNC},
- {"Ssl_session_cache_timeouts", (char*) &show_ssl_ctx_sess_timeouts, SHOW_FUNC},
- {"Ssl_sessions_reused", (char*) &show_ssl_session_reused, SHOW_FUNC},
- {"Ssl_used_session_cache_entries",(char*) &show_ssl_ctx_sess_number, SHOW_FUNC},
- {"Ssl_verify_depth", (char*) &show_ssl_get_verify_depth, SHOW_FUNC},
- {"Ssl_verify_mode", (char*) &show_ssl_get_verify_mode, SHOW_FUNC},
- {"Ssl_version", (char*) &show_ssl_get_version, SHOW_FUNC},
+ {"Ssl_accept_renegotiates", (char*) &show_ssl_ctx_sess_accept_renegotiate, SHOW_SIMPLE_FUNC},
+ {"Ssl_accepts", (char*) &show_ssl_ctx_sess_accept, SHOW_SIMPLE_FUNC},
+ {"Ssl_callback_cache_hits", (char*) &show_ssl_ctx_sess_cb_hits, SHOW_SIMPLE_FUNC},
+ {"Ssl_cipher", (char*) &show_ssl_get_cipher, SHOW_SIMPLE_FUNC},
+ {"Ssl_cipher_list", (char*) &show_ssl_get_cipher_list, SHOW_SIMPLE_FUNC},
+ {"Ssl_client_connects", (char*) &show_ssl_ctx_sess_connect, SHOW_SIMPLE_FUNC},
+ {"Ssl_connect_renegotiates", (char*) &show_ssl_ctx_sess_connect_renegotiate, SHOW_SIMPLE_FUNC},
+ {"Ssl_ctx_verify_depth", (char*) &show_ssl_ctx_get_verify_depth, SHOW_SIMPLE_FUNC},
+ {"Ssl_ctx_verify_mode", (char*) &show_ssl_ctx_get_verify_mode, SHOW_SIMPLE_FUNC},
+ {"Ssl_default_timeout", (char*) &show_ssl_get_default_timeout, SHOW_SIMPLE_FUNC},
+ {"Ssl_finished_accepts", (char*) &show_ssl_ctx_sess_accept_good, SHOW_SIMPLE_FUNC},
+ {"Ssl_finished_connects", (char*) &show_ssl_ctx_sess_connect_good, SHOW_SIMPLE_FUNC},
+ {"Ssl_session_cache_hits", (char*) &show_ssl_ctx_sess_hits, SHOW_SIMPLE_FUNC},
+ {"Ssl_session_cache_misses", (char*) &show_ssl_ctx_sess_misses, SHOW_SIMPLE_FUNC},
+ {"Ssl_session_cache_mode", (char*) &show_ssl_ctx_get_session_cache_mode, SHOW_SIMPLE_FUNC},
+ {"Ssl_session_cache_overflows", (char*) &show_ssl_ctx_sess_cache_full, SHOW_SIMPLE_FUNC},
+ {"Ssl_session_cache_size", (char*) &show_ssl_ctx_sess_get_cache_size, SHOW_SIMPLE_FUNC},
+ {"Ssl_session_cache_timeouts", (char*) &show_ssl_ctx_sess_timeouts, SHOW_SIMPLE_FUNC},
+ {"Ssl_sessions_reused", (char*) &show_ssl_session_reused, SHOW_SIMPLE_FUNC},
+ {"Ssl_used_session_cache_entries",(char*) &show_ssl_ctx_sess_number, SHOW_SIMPLE_FUNC},
+ {"Ssl_verify_depth", (char*) &show_ssl_get_verify_depth, SHOW_SIMPLE_FUNC},
+ {"Ssl_verify_mode", (char*) &show_ssl_get_verify_mode, SHOW_SIMPLE_FUNC},
+ {"Ssl_version", (char*) &show_ssl_get_version, SHOW_SIMPLE_FUNC},
#endif
#endif /* HAVE_OPENSSL */
{"Syncs", (char*) &my_sync_count, SHOW_LONG_NOFLUSH},
@@ -7099,16 +7193,16 @@ SHOW_VAR status_vars[]= {
{"Tc_log_page_waits", (char*) &tc_log_page_waits, SHOW_LONG},
#endif
#ifdef HAVE_POOL_OF_THREADS
- {"Threadpool_idle_threads", (char *) &show_threadpool_idle_threads, SHOW_FUNC},
+ {"Threadpool_idle_threads", (char *) &show_threadpool_idle_threads, SHOW_SIMPLE_FUNC},
{"Threadpool_threads", (char *) &tp_stats.num_worker_threads, SHOW_INT},
#endif
{"Threads_cached", (char*) &cached_thread_count, SHOW_LONG_NOFLUSH},
{"Threads_connected", (char*) &connection_count, SHOW_INT},
{"Threads_created", (char*) &thread_created, SHOW_LONG_NOFLUSH},
{"Threads_running", (char*) &thread_running, SHOW_INT},
- {"Uptime", (char*) &show_starttime, SHOW_FUNC},
+ {"Uptime", (char*) &show_starttime, SHOW_SIMPLE_FUNC},
#ifdef ENABLED_PROFILING
- {"Uptime_since_flush_status",(char*) &show_flushstatustime, SHOW_FUNC},
+ {"Uptime_since_flush_status",(char*) &show_flushstatustime, SHOW_SIMPLE_FUNC},
#endif
{NullS, NullS, SHOW_LONG}
};
@@ -7157,7 +7251,7 @@ static int option_cmp(my_option *a, my_option *b)
static void print_help()
{
MEM_ROOT mem_root;
- init_alloc_root(&mem_root, 4096, 4096);
+ init_alloc_root(&mem_root, 4096, 4096, MYF(0));
pop_dynamic(&all_options);
sys_var_add_options(&all_options, sys_var::PARSE_EARLY);
@@ -7301,10 +7395,12 @@ static int mysql_init_variables(void)
protocol_version= PROTOCOL_VERSION;
what_to_log= ~ (1L << (uint) COM_TIME);
refresh_version= 1L; /* Increments on each reload */
+ denied_connections= 0;
executed_events= 0;
global_query_id= thread_id= 1L;
my_atomic_rwlock_init(&global_query_id_lock);
my_atomic_rwlock_init(&thread_running_lock);
+ my_atomic_rwlock_init(&statistics_lock);
strmov(server_version, MYSQL_SERVER_VERSION);
threads.empty();
thread_cache.empty();
@@ -7328,6 +7424,7 @@ static int mysql_init_variables(void)
relay_log_info_file= (char*) "relay-log.info";
report_user= report_password = report_host= 0; /* TO BE DELETED */
opt_relay_logname= opt_relaylog_index_name= 0;
+ slave_retried_transactions= 0;
/* Variables in libraries */
charsets_dir= 0;
@@ -7352,8 +7449,13 @@ static int mysql_init_variables(void)
#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
have_ssl=SHOW_OPTION_YES;
+#if HAVE_YASSL
+ have_openssl= SHOW_OPTION_NO;
+#else
+ have_openssl= SHOW_OPTION_YES;
+#endif
#else
- have_ssl=SHOW_OPTION_NO;
+ have_openssl= have_ssl= SHOW_OPTION_NO;
#endif
#ifdef HAVE_BROKEN_REALPATH
have_symlink=SHOW_OPTION_NO;
@@ -7471,7 +7573,7 @@ mysqld_get_one_option(int optid,
default_collation_name= 0;
break;
case 'l':
- WARN_DEPRECATED(NULL, 7, 0, "--log", "'--general-log'/'--general-log-file'");
+ WARN_DEPRECATED(NULL, 10, 1, "--log", "'--general-log'/'--general-log-file'");
opt_log=1;
break;
case 'h':
@@ -7636,7 +7738,7 @@ mysqld_get_one_option(int optid,
}
#endif /* HAVE_REPLICATION */
case (int) OPT_SLOW_QUERY_LOG:
- WARN_DEPRECATED(NULL, 7, 0, "--log-slow-queries", "'--slow-query-log'/'--slow-query-log-file'");
+ WARN_DEPRECATED(NULL, 10, 1, "--log-slow-queries", "'--slow-query-log'/'--slow-query-log-file'");
opt_slow_log= 1;
break;
case (int) OPT_SAFE:
@@ -7653,7 +7755,7 @@ mysqld_get_one_option(int optid,
case (int) OPT_SKIP_PRIOR:
opt_specialflag|= SPECIAL_NO_PRIOR;
sql_print_warning("The --skip-thread-priority startup option is deprecated "
- "and will be removed in MySQL 7.0. This option has no effect "
+ "and will be removed in MySQL 11.0. This option has no effect "
"as the implied behavior is already the default.");
break;
case (int) OPT_SKIP_HOST_CACHE:
@@ -7841,7 +7943,7 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
/* prepare all_options array */
my_init_dynamic_array(&all_options, sizeof(my_option),
array_elements(my_long_options),
- array_elements(my_long_options)/4);
+ array_elements(my_long_options)/4, MYF(0));
for (my_option *opt= my_long_options;
opt < my_long_options + array_elements(my_long_options) - 1;
opt++)
@@ -8036,8 +8138,30 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
if (!max_long_data_size_used)
max_long_data_size= global_system_variables.max_allowed_packet;
- /* Rember if max_user_connections was 0 at startup */
+ /* Remember if max_user_connections was 0 at startup */
max_user_connections_checking= global_system_variables.max_user_connections != 0;
+
+ {
+ sys_var *max_relay_log_size_var, *max_binlog_size_var;
+ /* If max_relay_log_size is 0, then set it to max_binlog_size */
+ if (!global_system_variables.max_relay_log_size)
+ global_system_variables.max_relay_log_size= max_binlog_size;
+
+ /*
+ Fix so that DEFAULT and limit checking works with max_relay_log_size
+ (Yes, this is a hack, but it's required as the definition of
+ max_relay_log_size allows it to be set to 0).
+ */
+ max_relay_log_size_var= intern_find_sys_var("max_relay_log_size", 0);
+ max_binlog_size_var= intern_find_sys_var("max_binlog_size", 0);
+ if (max_binlog_size_var && max_relay_log_size_var)
+ {
+ max_relay_log_size_var->option.min_value=
+ max_binlog_size_var->option.min_value;
+ max_relay_log_size_var->option.def_value=
+ max_binlog_size_var->option.def_value;
+ }
+ }
return 0;
}
@@ -8321,7 +8445,7 @@ void refresh_status(THD *thd)
add_to_status(&global_status_var, &thd->status_var);
/* Reset thread's status variables */
- bzero((uchar*) &thd->status_var, sizeof(thd->status_var));
+ thd->set_status_var_init();
bzero((uchar*) &thd->org_status_var, sizeof(thd->org_status_var));
thd->start_bytes_received= 0;
@@ -8344,23 +8468,3 @@ void refresh_status(THD *thd)
mysql_mutex_unlock(&LOCK_thread_count);
}
-
-/*****************************************************************************
- Instantiate variables for missing storage engines
- This section should go away soon
-*****************************************************************************/
-
-/*****************************************************************************
- Instantiate templates
-*****************************************************************************/
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-/* Used templates */
-template class I_List<THD>;
-template class I_List_iterator<THD>;
-template class I_List<i_string>;
-template class I_List<i_string_pair>;
-template class I_List<Statement>;
-template class I_List_iterator<Statement>;
-#endif
-
diff --git a/sql/mysqld.h b/sql/mysqld.h
index 293c20ade55..6bde25f08fb 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -95,6 +95,7 @@ extern my_bool opt_safe_user_create;
extern my_bool opt_safe_show_db, opt_local_infile, opt_myisam_use_mmap;
extern my_bool opt_slave_compressed_protocol, use_temp_pool;
extern ulong slave_exec_mode_options;
+extern ulong slave_retried_transactions;
extern ulonglong slave_type_conversions_options;
extern my_bool read_only, opt_readonly;
extern my_bool lower_case_file_system;
@@ -105,6 +106,7 @@ extern char* opt_secure_backup_file_priv;
extern size_t opt_secure_backup_file_priv_len;
extern my_bool opt_log_slow_admin_statements, opt_log_slow_slave_statements;
extern my_bool sp_automatic_privileges, opt_noacl;
+extern ulong use_stat_tables;
extern my_bool opt_old_style_user_limits, trust_function_creators;
extern uint opt_crash_binlog_innodb;
extern char *shared_memory_base_name, *mysqld_unix_port;
@@ -169,7 +171,7 @@ extern ulong max_prepared_stmt_count, prepared_stmt_count;
extern ulong open_files_limit;
extern ulonglong binlog_cache_size, binlog_stmt_cache_size;
extern ulonglong max_binlog_cache_size, max_binlog_stmt_cache_size;
-extern ulong max_binlog_size, max_relay_log_size;
+extern ulong max_binlog_size;
extern ulong slave_max_allowed_packet;
extern ulong opt_binlog_rows_event_max_size;
extern ulong rpl_recovery_rank, thread_cache_size;
@@ -198,7 +200,7 @@ extern handlerton *myisam_hton;
extern handlerton *heap_hton;
extern const char *load_default_groups[];
extern struct my_option my_long_options[];
-extern int mysqld_server_started;
+extern int mysqld_server_started, mysqld_server_initialized;
extern "C" MYSQL_PLUGIN_IMPORT int orig_argc;
extern "C" MYSQL_PLUGIN_IMPORT char **orig_argv;
extern pthread_attr_t connection_attrib;
@@ -219,14 +221,15 @@ extern pthread_key(MEM_ROOT**,THR_MALLOC);
#ifdef HAVE_PSI_INTERFACE
#ifdef HAVE_MMAP
extern PSI_mutex_key key_PAGE_lock, key_LOCK_sync, key_LOCK_active,
- key_LOCK_pool;
+ key_LOCK_pool, key_LOCK_pending_checkpoint;
#endif /* HAVE_MMAP */
#ifdef HAVE_OPENSSL
extern PSI_mutex_key key_LOCK_des_key_file;
#endif
-extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_prep_xids,
+extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
+ key_BINLOG_LOCK_binlog_background_thread,
key_delayed_insert_mutex, key_hash_filo_lock, key_LOCK_active_mi,
key_LOCK_connection_count, key_LOCK_crypt, key_LOCK_delayed_create,
key_LOCK_delayed_insert, key_LOCK_delayed_status, key_LOCK_error_log,
@@ -257,7 +260,9 @@ extern PSI_rwlock_key key_rwlock_LOCK_grant, key_rwlock_LOCK_logger,
extern PSI_cond_key key_PAGE_cond, key_COND_active, key_COND_pool;
#endif /* HAVE_MMAP */
-extern PSI_cond_key key_BINLOG_COND_prep_xids, key_BINLOG_update_cond,
+extern PSI_cond_key key_BINLOG_COND_xid_list, key_BINLOG_update_cond,
+ key_BINLOG_COND_binlog_background_thread,
+ key_BINLOG_COND_binlog_background_thread_end,
key_COND_cache_status_changed, key_COND_manager,
key_COND_rpl_status, key_COND_server_started,
key_delayed_insert_cond, key_delayed_insert_cond_client,
@@ -433,6 +438,7 @@ enum enum_query_type
typedef int64 query_id_t;
extern query_id_t global_query_id;
extern my_atomic_rwlock_t global_query_id_lock;
+extern my_atomic_rwlock_t statistics_lock;
void unireg_end(void) __attribute__((noreturn));
@@ -528,6 +534,10 @@ inline THD *_current_thd(void)
}
#endif
#define current_thd _current_thd()
+inline int set_current_thd(THD *thd)
+{
+ return my_pthread_setspecific_ptr(THR_THD, thd);
+}
/*
@todo remove, make it static in ha_maria.cc
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index 08ac93f3091..cf3e962053c 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -120,14 +120,15 @@ static my_bool net_write_buff(NET *net,const uchar *packet,ulong len);
/** Init with packet info. */
-my_bool my_net_init(NET *net, Vio* vio)
+my_bool my_net_init(NET *net, Vio* vio, uint my_flags)
{
DBUG_ENTER("my_net_init");
+ DBUG_PRINT("enter", ("my_flags: %u", my_flags));
net->vio = vio;
my_net_local_init(net); /* Set some limits */
if (!(net->buff=(uchar*) my_malloc((size_t) net->max_packet+
NET_HEADER_SIZE + COMP_HEADER_SIZE +1,
- MYF(MY_WME))))
+ MYF(MY_WME | my_flags))))
DBUG_RETURN(1);
net->buff_end=net->buff+net->max_packet;
net->error=0; net->return_status=0;
@@ -139,6 +140,7 @@ my_bool my_net_init(NET *net, Vio* vio)
net->net_skip_rest_factor= 0;
net->last_errno=0;
net->unused= 0;
+ net->thread_specific_malloc= test(my_flags & MY_THREAD_SPECIFIC);
if (vio != 0) /* If real connection */
{
@@ -193,7 +195,9 @@ my_bool net_realloc(NET *net, size_t length)
*/
if (!(buff= (uchar*) my_realloc((char*) net->buff, pkt_length +
NET_HEADER_SIZE + COMP_HEADER_SIZE + 1,
- MYF(MY_WME))))
+ MYF(MY_WME |
+ (net->thread_specific_malloc ?
+ MY_THREAD_SPECIFIC : 0)))))
{
/* @todo: 1 and 2 codes are identical. */
net->error= 1;
@@ -603,7 +607,10 @@ net_real_write(NET *net,const uchar *packet, size_t len)
uchar *b;
uint header_length=NET_HEADER_SIZE+COMP_HEADER_SIZE;
if (!(b= (uchar*) my_malloc(len + NET_HEADER_SIZE +
- COMP_HEADER_SIZE + 1, MYF(MY_WME))))
+ COMP_HEADER_SIZE + 1,
+ MYF(MY_WME |
+ (net->thread_specific_malloc ?
+ MY_THREAD_SPECIFIC : 0)))))
{
net->error= 2;
net->last_errno= ER_OUT_OF_RESOURCES;
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index cb25ca7760e..7a3a0c94a18 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -1773,7 +1773,8 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
index= key_nr;
head= table;
key_part_info= head->key_info[index].key_part;
- my_init_dynamic_array(&ranges, sizeof(QUICK_RANGE*), 16, 16);
+ my_init_dynamic_array(&ranges, sizeof(QUICK_RANGE*), 16, 16,
+ MYF(MY_THREAD_SPECIFIC));
/* 'thd' is not accessible in QUICK_RANGE_SELECT::reset(). */
mrr_buf_size= thd->variables.mrr_buff_size;
@@ -1782,7 +1783,8 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
if (!no_alloc && !parent_alloc)
{
// Allocates everything through the internal memroot
- init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
+ init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0,
+ MYF(MY_THREAD_SPECIFIC));
thd->mem_root= &alloc;
}
else
@@ -1792,7 +1794,7 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
/* Allocate a bitmap for used columns (Q: why not on MEM_ROOT?) */
if (!(bitmap= (my_bitmap_map*) my_malloc(head->s->column_bitmap_size,
- MYF(MY_WME))))
+ MYF(MY_WME | MY_THREAD_SPECIFIC))))
{
column_bitmap.bitmap= 0;
*create_error= 1;
@@ -1877,7 +1879,8 @@ QUICK_INDEX_SORT_SELECT::QUICK_INDEX_SORT_SELECT(THD *thd_param,
index= MAX_KEY;
head= table;
bzero(&read_record, sizeof(read_record));
- init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
+ init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0,
+ MYF(MY_THREAD_SPECIFIC));
DBUG_VOID_RETURN;
}
@@ -1948,7 +1951,8 @@ QUICK_ROR_INTERSECT_SELECT::QUICK_ROR_INTERSECT_SELECT(THD *thd_param,
head= table;
record= head->record[0];
if (!parent_alloc)
- init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
+ init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0,
+ MYF(MY_THREAD_SPECIFIC));
else
bzero(&alloc, sizeof(MEM_ROOT));
last_rowid= (uchar*) alloc_root(parent_alloc? parent_alloc : &alloc,
@@ -2247,7 +2251,8 @@ QUICK_ROR_UNION_SELECT::QUICK_ROR_UNION_SELECT(THD *thd_param,
head= table;
rowid_length= table->file->ref_length;
record= head->record[0];
- init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
+ init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0,
+ MYF(MY_THREAD_SPECIFIC));
thd_param->mem_root= &alloc;
}
@@ -2929,7 +2934,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu",
(ulong) keys_to_use.to_ulonglong(), (ulong) prev_tables,
(ulong) const_tables));
- DBUG_PRINT("info", ("records: %lu", (ulong) head->file->stats.records));
+ DBUG_PRINT("info", ("records: %lu", (ulong) head->stat_records()));
delete quick;
quick=0;
needed_reg.clear_all();
@@ -2937,7 +2942,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_ASSERT(!head->is_filled_at_execution());
if (keys_to_use.is_clear_all() || head->is_filled_at_execution())
DBUG_RETURN(0);
- records= head->file->stats.records;
+ records= head->stat_records();
if (!records)
records++; /* purecov: inspected */
scan_time= (double) records / TIME_FOR_COMPARE + 1;
@@ -2981,7 +2986,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
param.force_default_mrr= ordered_output;
thd->no_errors=1; // Don't warn about NULL
- init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
+ init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0,
+ MYF(MY_THREAD_SPECIFIC));
if (!(param.key_parts=
(KEY_PART*) alloc_root(&alloc,
sizeof(KEY_PART) *
@@ -3074,7 +3080,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
if (group_trp)
{
param.table->quick_condition_rows= min(group_trp->records,
- head->file->stats.records);
+ head->stat_records());
if (group_trp->read_cost < best_read_time)
{
best_trp= group_trp;
@@ -3426,7 +3432,8 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
my_bitmap_map *old_sets[2];
prune_param.part_info= part_info;
- init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
+ init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0,
+ MYF(MY_THREAD_SPECIFIC));
range_par->mem_root= &alloc;
range_par->old_root= thd->mem_root;
@@ -4680,7 +4687,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
DBUG_PRINT("info", ("index_merge scans cost %g", imerge_cost));
if (imerge_too_expensive || (imerge_cost > read_time) ||
((non_cpk_scan_records+cpk_scan_records >=
- param->table->file->stats.records) &&
+ param->table->stat_records()) &&
read_time != DBL_MAX))
{
/*
@@ -4751,7 +4758,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
imerge_trp->read_cost= imerge_cost;
imerge_trp->records= non_cpk_scan_records + cpk_scan_records;
imerge_trp->records= min(imerge_trp->records,
- param->table->file->stats.records);
+ param->table->stat_records());
imerge_trp->range_scans= range_scans;
imerge_trp->range_scans_end= range_scans + n_child_scans;
read_time= imerge_cost;
@@ -4822,7 +4829,7 @@ skip_to_ror_scan:
((TRP_ROR_INTERSECT*)(*cur_roru_plan))->index_scan_costs;
roru_total_records += (*cur_roru_plan)->records;
roru_intersect_part *= (*cur_roru_plan)->records /
- param->table->file->stats.records;
+ param->table->stat_records();
}
/*
@@ -4832,7 +4839,7 @@ skip_to_ror_scan:
in disjunction do not share key parts.
*/
roru_total_records -= (ha_rows)(roru_intersect_part*
- param->table->file->stats.records);
+ param->table->stat_records());
/* ok, got a ROR read plan for each of the disjuncts
Calculate cost:
cost(index_union_scan(scan_1, ... scan_n)) =
@@ -5109,12 +5116,12 @@ static inline
ha_rows get_table_cardinality_for_index_intersect(TABLE *table)
{
if (table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT)
- return table->file->stats.records;
+ return table->stat_records();
else
{
ha_rows d;
double q;
- for (q= (double)table->file->stats.records, d= 1 ; q >= 10; q/= 10, d*= 10 ) ;
+ for (q= (double)table->stat_records(), d= 1 ; q >= 10; q/= 10, d*= 10 ) ;
return (ha_rows) (floor(q+0.5) * d);
}
}
@@ -5517,9 +5524,8 @@ ha_rows records_in_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
ha_rows ext_records= ext_index_scan->records;
if (i < used_key_parts)
{
- ulong *rec_per_key= key_info->rec_per_key+i-1;
- ulong f1= rec_per_key[0] ? rec_per_key[0] : 1;
- ulong f2= rec_per_key[1] ? rec_per_key[1] : 1;
+ ulong f1= key_info->actual_rec_per_key(i-1);
+ ulong f2= key_info->actual_rec_per_key(i);
ext_records= (ha_rows) ((double) ext_records / f2 * f1);
}
if (ext_records < table_cardinality)
@@ -6011,7 +6017,7 @@ ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param)
info->is_covering= FALSE;
info->index_scan_costs= 0.0;
info->index_records= 0;
- info->out_rows= (double) param->table->file->stats.records;
+ info->out_rows= (double) param->table->stat_records();
bitmap_clear_all(&info->covered_fields);
return info;
}
@@ -6137,7 +6143,7 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
min_range.flag= HA_READ_KEY_EXACT;
max_range.key= key_val;
max_range.flag= HA_READ_AFTER_KEY;
- ha_rows prev_records= info->param->table->file->stats.records;
+ ha_rows prev_records= info->param->table->stat_records();
DBUG_ENTER("ror_scan_selectivity");
for (sel_arg= scan->sel_arg; sel_arg;
@@ -6364,7 +6370,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
double min_cost= DBL_MAX;
DBUG_ENTER("get_best_ror_intersect");
- if ((tree->n_ror_scans < 2) || !param->table->file->stats.records ||
+ if ((tree->n_ror_scans < 2) || !param->table->stat_records() ||
!optimizer_flag(param->thd, OPTIMIZER_SWITCH_INDEX_MERGE_INTERSECT))
DBUG_RETURN(NULL);
@@ -12701,14 +12707,14 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
double cpu_cost= 0; /* TODO: CPU cost of index_read calls? */
DBUG_ENTER("cost_group_min_max");
- table_records= table->file->stats.records;
+ table_records= table->stat_records();
keys_per_block= (table->file->stats.block_size / 2 /
(index_info->key_length + table->file->ref_length)
+ 1);
num_blocks= (uint)(table_records / keys_per_block) + 1;
/* Compute the number of keys in a group. */
- keys_per_group= index_info->rec_per_key[group_key_parts - 1];
+ keys_per_group= index_info->actual_rec_per_key(group_key_parts - 1);
if (keys_per_group == 0) /* If there is no statistics try to guess */
/* each group contains 10% of all records */
keys_per_group= (uint)(table_records / 10) + 1;
@@ -12728,7 +12734,7 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
Compute the probability that two ends of a subgroup are inside
different blocks.
*/
- keys_per_subgroup= index_info->rec_per_key[used_key_parts - 1];
+ keys_per_subgroup= index_info->actual_rec_per_key(used_key_parts - 1);
if (keys_per_subgroup >= keys_per_block) /* If a subgroup is bigger than */
p_overlap= 1.0; /* a block, it will overlap at least two blocks. */
else
@@ -12925,7 +12931,8 @@ QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg,
DBUG_ASSERT(!parent_alloc);
if (!parent_alloc)
{
- init_sql_alloc(&alloc, join->thd->variables.range_alloc_block_size, 0);
+ init_sql_alloc(&alloc, join->thd->variables.range_alloc_block_size, 0,
+ MYF(MY_THREAD_SPECIFIC));
join->thd->mem_root= &alloc;
}
else
@@ -12980,7 +12987,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::init()
if (min_max_arg_part)
{
- if (my_init_dynamic_array(&min_max_ranges, sizeof(QUICK_RANGE*), 16, 16))
+ if (my_init_dynamic_array(&min_max_ranges, sizeof(QUICK_RANGE*), 16, 16,
+ MYF(MY_THREAD_SPECIFIC)))
return 1;
if (have_min)
@@ -14184,11 +14192,3 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
#endif /* !DBUG_OFF */
-/*****************************************************************************
-** Instantiate templates
-*****************************************************************************/
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class List<QUICK_RANGE>;
-template class List_iterator<QUICK_RANGE>;
-#endif
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 753722fac08..e149de6fe53 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -1656,6 +1656,7 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred)
parent_lex->ftfunc_list->push_front(ifm);
}
+ parent_lex->have_merged_subqueries= TRUE;
DBUG_RETURN(FALSE);
}
@@ -1766,6 +1767,8 @@ static bool convert_subq_to_jtbm(JOIN *parent_join,
create_subquery_temptable_name(tbl_alias, hash_sj_engine->materialize_join->
select_lex->select_number);
jtbm->alias= tbl_alias;
+
+ parent_lex->have_merged_subqueries= TRUE;
#if 0
/* Inject sj_on_expr into the parent's WHERE or ON */
if (emb_tbl_nest)
@@ -3869,7 +3872,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
using_unique_constraint= TRUE;
/* STEP 3: Allocate memory for temptable description */
- init_sql_alloc(&own_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+ init_sql_alloc(&own_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
if (!multi_alloc_root(&own_root,
&table, sizeof(*table),
&share, sizeof(*share),
diff --git a/sql/protocol.cc b/sql/protocol.cc
index 3af7dc88b88..f6e9e9e62e1 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -776,7 +776,7 @@ bool Protocol::send_result_set_metadata(List<Item> *list, uint flags)
pos= (char*) local_packet->ptr()+local_packet->length();
*pos++= 12; // Length of packed fields
/* inject a NULL to test the client */
- DBUG_EXECUTE_IF("poison_rs_fields", pos[-1]= 0xfb;);
+ DBUG_EXECUTE_IF("poison_rs_fields", pos[-1]= (char) 0xfb;);
if (item->charset_for_protocol() == &my_charset_bin || thd_charset == NULL)
{
/* No conversion */
diff --git a/sql/protocol.h b/sql/protocol.h
index d9bc48ce45d..1c0a28560bd 100644
--- a/sql/protocol.h
+++ b/sql/protocol.h
@@ -35,6 +35,7 @@ class Protocol
protected:
THD *thd;
String *packet;
+ /* Used by net_store_data() for charset conversions */
String *convert;
uint field_pos;
#ifndef DBUG_OFF
@@ -49,6 +50,10 @@ protected:
MYSQL_FIELD *next_mysql_field;
MEM_ROOT *alloc;
#endif
+ /*
+ The following two are low-level functions that are invoked from
+ higher-level store_xxx() funcs. The data is stored into this->packet.
+ */
bool net_store_data(const uchar *from, size_t length,
CHARSET_INFO *fromcs, CHARSET_INFO *tocs);
bool store_string_aux(const char *from, size_t length,
diff --git a/sql/records.cc b/sql/records.cc
index 7b2cea2830b..d28799ea9d2 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -594,7 +594,7 @@ static int init_rr_cache(THD *thd, READ_RECORD *info)
if (info->cache_records <= 2 ||
!(info->cache=(uchar*) my_malloc_lock(rec_cache_size+info->cache_records*
info->struct_length+1,
- MYF(0))))
+ MYF(MY_THREAD_SPECIFIC))))
DBUG_RETURN(1);
#ifdef HAVE_valgrind
// Avoid warnings in qsort
diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc
index 0380bc323a3..f2bd036896d 100644
--- a/sql/rpl_filter.cc
+++ b/sql/rpl_filter.cc
@@ -573,7 +573,7 @@ void
Rpl_filter::init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited)
{
my_init_dynamic_array(a, sizeof(TABLE_RULE_ENT*), TABLE_RULE_ARR_SIZE,
- TABLE_RULE_ARR_SIZE);
+ TABLE_RULE_ARR_SIZE, MYF(0));
*a_inited = 1;
}
diff --git a/sql/rpl_handler.cc b/sql/rpl_handler.cc
index 9267190605c..258dae0edb2 100644
--- a/sql/rpl_handler.cc
+++ b/sql/rpl_handler.cc
@@ -189,7 +189,7 @@ void delegates_destroy()
DYNAMIC_ARRAY *plugins= &s.plugins; \
plugin_ref *plugins_buffer= s.plugins_buffer; \
my_init_dynamic_array2(plugins, sizeof(plugin_ref), \
- plugins_buffer, 8, 8); \
+ plugins_buffer, 8, 8, MYF(0)); \
read_lock(); \
Observer_info_iterator iter= observer_info_iter(); \
Observer_info *info= iter++; \
diff --git a/sql/rpl_handler.h b/sql/rpl_handler.h
index 4743fffb9a0..e028fb49808 100644
--- a/sql/rpl_handler.h
+++ b/sql/rpl_handler.h
@@ -124,7 +124,7 @@ public:
inited= FALSE;
if (my_rwlock_init(&lock, NULL))
return;
- init_sql_alloc(&memroot, 1024, 0);
+ init_sql_alloc(&memroot, 1024, 0, MYF(0));
inited= TRUE;
}
~Delegate()
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index 3c5a99121fa..5348a94b35b 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -20,6 +20,8 @@
#include "unireg.h" // REQUIRED by other includes
#include "rpl_mi.h"
#include "slave.h" // SLAVE_MAX_HEARTBEAT_PERIOD
+#include "strfunc.h"
+#include "sql_repl.h"
#ifdef HAVE_REPLICATION
@@ -27,7 +29,8 @@
static void init_master_log_pos(Master_info* mi);
-Master_info::Master_info(bool is_slave_recovery)
+Master_info::Master_info(LEX_STRING *connection_name_arg,
+ bool is_slave_recovery)
:Slave_reporting_capability("I/O"),
ssl(0), ssl_verify_server_cert(1), fd(-1), io_thd(0),
rli(is_slave_recovery), port(MYSQL_PORT),
@@ -40,7 +43,25 @@ Master_info::Master_info(bool is_slave_recovery)
ssl_ca[0]= 0; ssl_capath[0]= 0; ssl_cert[0]= 0;
ssl_cipher[0]= 0; ssl_key[0]= 0;
- my_init_dynamic_array(&ignore_server_ids, sizeof(::server_id), 16, 16);
+ /*
+ Store connection name and lower case connection name
+ It's safe to ignore any OMM errors as this is checked by error()
+ */
+ connection_name.length= cmp_connection_name.length=
+ connection_name_arg->length;
+ if ((connection_name.str= (char*) my_malloc(connection_name_arg->length*2+2,
+ MYF(MY_WME))))
+ {
+ cmp_connection_name.str= (connection_name.str +
+ connection_name_arg->length+1);
+ strmake(connection_name.str, connection_name_arg->str,
+ connection_name.length);
+ memcpy(cmp_connection_name.str, connection_name_arg->str,
+ connection_name.length+1);
+ my_casedn_str(system_charset_info, cmp_connection_name.str);
+ }
+
+ my_init_dynamic_array(&ignore_server_ids, sizeof(::server_id), 16, 16, MYF(0));
bzero((char*) &file, sizeof(file));
mysql_mutex_init(key_master_info_run_lock, &run_lock, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_master_info_data_lock, &data_lock, MY_MUTEX_INIT_FAST);
@@ -55,6 +76,7 @@ Master_info::Master_info(bool is_slave_recovery)
Master_info::~Master_info()
{
+ my_free(connection_name.str);
delete_dynamic(&ignore_server_ids);
mysql_mutex_destroy(&run_lock);
mysql_mutex_destroy(&data_lock);
@@ -407,7 +429,7 @@ file '%s')", fname);
mi->master_log_name,
(ulong) mi->master_log_pos));
- mi->rli.mi = mi;
+ mi->rli.mi= mi;
if (init_relay_log_info(&mi->rli, slave_info_fname))
goto err;
@@ -560,5 +582,595 @@ void end_master_info(Master_info* mi)
DBUG_VOID_RETURN;
}
+/* Multi-Master By P.Linux */
+uchar *get_key_master_info(Master_info *mi, size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ /* Return lower case name */
+ *length= mi->cmp_connection_name.length;
+ return (uchar*) mi->cmp_connection_name.str;
+}
+
+void free_key_master_info(Master_info *mi)
+{
+ DBUG_ENTER("free_key_master_info");
+ terminate_slave_threads(mi,SLAVE_FORCE_ALL);
+ end_master_info(mi);
+ delete mi;
+ DBUG_VOID_RETURN;
+}
+
+/**
+ Check if connection name for master_info is valid.
+
+ It's valid if it's a valid system name of length less than
+ MAX_CONNECTION_NAME.
+
+ @return
+ 0 ok
+ 1 error
+*/
+
+bool check_master_connection_name(LEX_STRING *name)
+{
+ if (name->length >= MAX_CONNECTION_NAME)
+ return 1;
+ return 0;
+}
+
+
+/**
+ Create a log file with a given suffix.
+
+ @param
+ res_file_name Store result here
+ length Length of res_file_name buffer
+ info_file Original file name (prefix)
+ append 1 if we should add suffix last (not before ext)
+ suffix Suffix
+
+ @note
+ The suffix is added before the extension of the file name prefixed with '-'.
+ The suffix is also converted to lower case and we transform
+ all not safe character, as we do with MySQL table names.
+
+ If suffix is an empty string, then we don't add any suffix.
+ This is to allow one to use this function also to generate old
+ file names without a prefix.
+*/
+
+void create_logfile_name_with_suffix(char *res_file_name, uint length,
+ const char *info_file, bool append,
+ LEX_STRING *suffix)
+{
+ char buff[MAX_CONNECTION_NAME+1], res[MAX_CONNECTION_NAME+1], *p;
+
+ p= strmake(res_file_name, info_file, length);
+ /* If not empty suffix and there is place left for some part of the suffix */
+ if (suffix->length != 0 && p <= res_file_name + length -1)
+ {
+ const char *info_file_end= info_file + (p - res_file_name);
+ const char *ext= append ? info_file_end : fn_ext2(info_file);
+ size_t res_length, ext_pos;
+ uint errors;
+
+ /* Create null terminated string */
+ strmake(buff, suffix->str, suffix->length);
+ /* Convert to lower case */
+ my_casedn_str(system_charset_info, buff);
+ /* Convert to characters usable in a file name */
+ res_length= strconvert(system_charset_info, buff,
+ &my_charset_filename, res, sizeof(res), &errors);
+
+ ext_pos= (size_t) (ext - info_file);
+ length-= (suffix->length - ext_pos); /* Leave place for extension */
+ p= res_file_name + ext_pos;
+ *p++= '-'; /* Add separator */
+ p= strmake(p, res, min((size_t) (length - (p - res_file_name)),
+ res_length));
+ /* Add back extension. We have checked above that there is space for it */
+ strmov(p, ext);
+ }
+}
+
+
+Master_info_index::Master_info_index()
+{
+ size_t filename_length, dir_length;
+ /*
+ Create the Master_info index file by prepending 'multi-' before
+ the master_info_file file name.
+ */
+ fn_format(index_file_name, master_info_file, mysql_data_home,
+ "", MY_UNPACK_FILENAME);
+ filename_length= strlen(index_file_name) + 1; /* Count 0 byte */
+ dir_length= dirname_length(index_file_name);
+ bmove_upp((uchar*) index_file_name + filename_length + 6,
+ (uchar*) index_file_name + filename_length,
+ filename_length - dir_length);
+ memcpy(index_file_name + dir_length, "multi-", 6);
+
+ bzero((char*) &index_file, sizeof(index_file));
+}
+
+Master_info_index::~Master_info_index()
+{
+ /* This will close connection for all objects in the cache */
+ my_hash_free(&master_info_hash);
+ end_io_cache(&index_file);
+ if (index_file.file > 0)
+ my_close(index_file.file, MYF(MY_WME));
+}
+
+
+/* Load All Master_info from master.info.index File
+ * RETURN:
+ * 0 - All Success
+ * 1 - All Fail
+ * 2 - Some Success, Some Fail
+ */
+
+bool Master_info_index::init_all_master_info()
+{
+ int thread_mask;
+ int err_num= 0, succ_num= 0; // The number of success read Master_info
+ char sign[MAX_CONNECTION_NAME];
+ File index_file_nr;
+ DBUG_ENTER("init_all_master_info");
+
+ if ((index_file_nr= my_open(index_file_name,
+ O_RDWR | O_CREAT | O_BINARY ,
+ MYF(MY_WME | ME_NOREFRESH))) < 0 ||
+ my_sync(index_file_nr, MYF(MY_WME)) ||
+ init_io_cache(&index_file, index_file_nr,
+ IO_SIZE, READ_CACHE,
+ my_seek(index_file_nr,0L,MY_SEEK_END,MYF(0)),
+ 0, MYF(MY_WME | MY_WAIT_IF_FULL)))
+ {
+ if (index_file_nr >= 0)
+ my_close(index_file_nr,MYF(0));
+
+ sql_print_error("Creation of Master_info index file '%s' failed",
+ index_file_name);
+ DBUG_RETURN(1);
+ }
+
+ /* Initialize Master_info Hash Table */
+ if (my_hash_init(&master_info_hash, system_charset_info,
+ MAX_REPLICATION_THREAD, 0, 0,
+ (my_hash_get_key) get_key_master_info,
+ (my_hash_free_key)free_key_master_info, HASH_UNIQUE))
+ {
+ sql_print_error("Initializing Master_info hash table failed");
+ DBUG_RETURN(1);
+ }
+
+ reinit_io_cache(&index_file, READ_CACHE, 0L,0,0);
+ while (!init_strvar_from_file(sign, sizeof(sign),
+ &index_file, NULL))
+ {
+ LEX_STRING connection_name;
+ Master_info *mi;
+ char buf_master_info_file[FN_REFLEN];
+ char buf_relay_log_info_file[FN_REFLEN];
+
+ connection_name.str= sign;
+ connection_name.length= strlen(sign);
+ if (!(mi= new Master_info(&connection_name, relay_log_recovery)) ||
+ mi->error())
+ {
+ delete mi;
+ DBUG_RETURN(1);
+ }
+
+ lock_slave_threads(mi);
+ init_thread_mask(&thread_mask,mi,0 /*not inverse*/);
+
+ create_logfile_name_with_suffix(buf_master_info_file, sizeof(buf_master_info_file),
+ master_info_file, 0, &connection_name);
+ create_logfile_name_with_suffix(buf_relay_log_info_file,
+ sizeof(buf_relay_log_info_file),
+ relay_log_info_file, 0, &connection_name);
+ if (global_system_variables.log_warnings > 1)
+ sql_print_information("Reading Master_info: '%s' Relay_info:'%s'",
+ buf_master_info_file, buf_relay_log_info_file);
+
+ if (init_master_info(mi, buf_master_info_file, buf_relay_log_info_file,
+ 0, thread_mask))
+ {
+ err_num++;
+ sql_print_error("Initialized Master_info from '%s' failed",
+ buf_master_info_file);
+ if (!master_info_index->get_master_info(&connection_name,
+ MYSQL_ERROR::WARN_LEVEL_NOTE))
+ {
+ /* Master_info is not in HASH; Add it */
+ if (master_info_index->add_master_info(mi, FALSE))
+ return 1;
+ succ_num++;
+ unlock_slave_threads(mi);
+ }
+ else
+ {
+ /* Master_info already in HASH */
+ sql_print_error(ER(ER_CONNECTION_ALREADY_EXISTS),
+ (int) connection_name.length, connection_name.str);
+ unlock_slave_threads(mi);
+ delete mi;
+ }
+ continue;
+ }
+ else
+ {
+ /* Initialization of Master_info succeded. Add it to HASH */
+ if (global_system_variables.log_warnings > 1)
+ sql_print_information("Initialized Master_info from '%s'",
+ buf_master_info_file);
+ if (master_info_index->get_master_info(&connection_name,
+ MYSQL_ERROR::WARN_LEVEL_NOTE))
+ {
+ /* Master_info was already registered */
+ sql_print_error(ER(ER_CONNECTION_ALREADY_EXISTS),
+ (int) connection_name.length, connection_name.str);
+ unlock_slave_threads(mi);
+ delete mi;
+ continue;
+ }
+
+ /* Master_info was not registered; add it */
+ if (master_info_index->add_master_info(mi, FALSE))
+ return 1;
+ succ_num++;
+ unlock_slave_threads(mi);
+
+ if (!opt_skip_slave_start)
+ {
+ if (start_slave_threads(1 /* need mutex */,
+ 0 /* no wait for start*/,
+ mi,
+ buf_master_info_file,
+ buf_relay_log_info_file,
+ SLAVE_IO | SLAVE_SQL))
+ {
+ sql_print_error("Failed to create slave threads for connection '%.*s'",
+ (int) connection_name.length,
+ connection_name.str);
+ continue;
+ }
+ if (global_system_variables.log_warnings)
+ sql_print_information("Started replication for '%.*s'",
+ (int) connection_name.length,
+ connection_name.str);
+ }
+ }
+ }
+
+ if (!err_num) // No Error on read Master_info
+ {
+ if (global_system_variables.log_warnings > 1)
+ sql_print_information("Reading of all Master_info entries succeded");
+ DBUG_RETURN(0);
+ }
+ else if (succ_num) // Have some Error and some Success
+ {
+ sql_print_warning("Reading of some Master_info entries failed");
+ DBUG_RETURN(2);
+ }
+ else // All failed
+ {
+ sql_print_error("Reading of all Master_info entries failed!");
+ DBUG_RETURN(1);
+ }
+}
+
+
+/* Write new master.info to master.info.index File */
+bool Master_info_index::write_master_name_to_index_file(LEX_STRING *name,
+ bool do_sync)
+{
+ DBUG_ASSERT(my_b_inited(&index_file) != 0);
+ DBUG_ENTER("write_master_name_to_index_file");
+
+ /* Don't write default slave to master_info.index */
+ if (name->length == 0)
+ DBUG_RETURN(0);
+
+ reinit_io_cache(&index_file, WRITE_CACHE,
+ my_b_filelength(&index_file), 0, 0);
+
+ if (my_b_write(&index_file, (uchar*) name->str, name->length) ||
+ my_b_write(&index_file, (uchar*) "\n", 1) ||
+ flush_io_cache(&index_file) ||
+ (do_sync && my_sync(index_file.file, MYF(MY_WME))))
+ {
+ sql_print_error("Write of new Master_info for '%.*s' to index file failed",
+ (int) name->length, name->str);
+ DBUG_RETURN(1);
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Get Master_info for a connection
+
+ @param
+ connection_name Connection name
+ warning WARN_LEVEL_NOTE -> Don't print anything
+ WARN_LEVEL_WARN -> Issue warning if not exists
+ WARN_LEVEL_ERROR-> Issue error if not exists
+*/
+
+Master_info *
+Master_info_index::get_master_info(LEX_STRING *connection_name,
+ MYSQL_ERROR::enum_warning_level warning)
+{
+ Master_info *mi;
+ char buff[MAX_CONNECTION_NAME+1], *res;
+ uint buff_length;
+ DBUG_ENTER("get_master_info");
+ DBUG_PRINT("enter",
+ ("connection_name: '%.*s'", (int) connection_name->length,
+ connection_name->str));
+
+ /* Make name lower case for comparison */
+ res= strmake(buff, connection_name->str, connection_name->length);
+ my_casedn_str(system_charset_info, buff);
+ buff_length= (size_t) (res-buff);
+
+ mi= (Master_info*) my_hash_search(&master_info_hash,
+ (uchar*) buff, buff_length);
+ if (!mi && warning != MYSQL_ERROR::WARN_LEVEL_NOTE)
+ {
+ my_error(WARN_NO_MASTER_INFO,
+ MYF(warning == MYSQL_ERROR::WARN_LEVEL_WARN ? ME_JUST_WARNING :
+ 0),
+ (int) connection_name->length,
+ connection_name->str);
+ }
+ DBUG_RETURN(mi);
+}
+
+
+/* Check Master_host & Master_port is duplicated or not */
+bool Master_info_index::check_duplicate_master_info(LEX_STRING *name_arg,
+ const char *host,
+ uint port)
+{
+ Master_info *mi;
+ DBUG_ENTER("check_duplicate_master_info");
+
+ /* Get full host and port name */
+ if ((mi= master_info_index->get_master_info(name_arg,
+ MYSQL_ERROR::WARN_LEVEL_NOTE)))
+ {
+ if (!host)
+ host= mi->host;
+ if (!port)
+ port= mi->port;
+ }
+ if (!host || !port)
+ DBUG_RETURN(FALSE); // Not comparable yet
+
+ for (uint i= 0; i < master_info_hash.records; ++i)
+ {
+ Master_info *tmp_mi;
+ tmp_mi= (Master_info *) my_hash_element(&master_info_hash, i);
+ if (tmp_mi == mi)
+ continue; // Current connection
+ if (!strcasecmp(host, tmp_mi->host) && port == tmp_mi->port)
+ {
+ my_error(ER_CONNECTION_ALREADY_EXISTS, MYF(0),
+ (int) name_arg->length,
+ name_arg->str,
+ (int) tmp_mi->connection_name.length,
+ tmp_mi->connection_name.str);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/* Add a Master_info class to Hash Table */
+bool Master_info_index::add_master_info(Master_info *mi, bool write_to_file)
+{
+ if (!my_hash_insert(&master_info_hash, (uchar*) mi))
+ {
+ if (global_system_variables.log_warnings > 1)
+ sql_print_information("Added new Master_info '%.*s' to hash table",
+ (int) mi->connection_name.length,
+ mi->connection_name.str);
+ if (write_to_file)
+ return write_master_name_to_index_file(&mi->connection_name, 1);
+ return FALSE;
+ }
+
+ /* Impossible error (EOM) ? */
+ sql_print_error("Adding new entry '%.*s' to master_info failed",
+ (int) mi->connection_name.length,
+ mi->connection_name.str);
+ return TRUE;
+}
+
+
+/**
+ Remove a Master_info class From Hash Table
+
+ TODO: Change this to use my_rename() to make the file name creation
+ atomic
+*/
+
+bool Master_info_index::remove_master_info(LEX_STRING *name)
+{
+ Master_info* mi;
+ DBUG_ENTER("remove_master_info");
+
+ if ((mi= get_master_info(name, MYSQL_ERROR::WARN_LEVEL_WARN)))
+ {
+ // Delete Master_info and rewrite others to file
+ if (!my_hash_delete(&master_info_hash, (uchar*) mi))
+ {
+ File index_file_nr;
+
+ // Close IO_CACHE and FILE handler fisrt
+ end_io_cache(&index_file);
+ my_close(index_file.file, MYF(MY_WME));
+
+ // Reopen File and truncate it
+ if ((index_file_nr= my_open(index_file_name,
+ O_RDWR | O_CREAT | O_TRUNC | O_BINARY ,
+ MYF(MY_WME))) < 0 ||
+ init_io_cache(&index_file, index_file_nr,
+ IO_SIZE, WRITE_CACHE,
+ my_seek(index_file_nr,0L,MY_SEEK_END,MYF(0)),
+ 0, MYF(MY_WME | MY_WAIT_IF_FULL)))
+ {
+ int error= my_errno;
+ if (index_file_nr >= 0)
+ my_close(index_file_nr,MYF(0));
+
+ sql_print_error("Create of Master Info Index file '%s' failed with "
+ "error: %M",
+ index_file_name, error);
+ DBUG_RETURN(TRUE);
+ }
+
+ // Rewrite Master_info.index
+ for (uint i= 0; i< master_info_hash.records; ++i)
+ {
+ Master_info *tmp_mi;
+ tmp_mi= (Master_info *) my_hash_element(&master_info_hash, i);
+ write_master_name_to_index_file(&tmp_mi->connection_name, 0);
+ }
+ my_sync(index_file_nr, MYF(MY_WME));
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ Master_info_index::give_error_if_slave_running()
+
+ @return
+ TRUE If some slave is running. An error is printed
+ FALSE No slave is running
+*/
+
+bool Master_info_index::give_error_if_slave_running()
+{
+ DBUG_ENTER("warn_if_slave_running");
+ mysql_mutex_assert_owner(&LOCK_active_mi);
+
+ for (uint i= 0; i< master_info_hash.records; ++i)
+ {
+ Master_info *mi;
+ mi= (Master_info *) my_hash_element(&master_info_hash, i);
+ if (mi->rli.slave_running != MYSQL_SLAVE_NOT_RUN)
+ {
+ my_error(ER_SLAVE_MUST_STOP, MYF(0), (int) mi->connection_name.length,
+ mi->connection_name.str);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ Master_info_index::start_all_slaves()
+
+ Start all slaves that was not running.
+
+ @return
+ TRUE Error
+ FALSE Everything ok.
+*/
+
+bool Master_info_index::start_all_slaves(THD *thd)
+{
+ bool result= FALSE;
+ DBUG_ENTER("warn_if_slave_running");
+ mysql_mutex_assert_owner(&LOCK_active_mi);
+
+ for (uint i= 0; i< master_info_hash.records; ++i)
+ {
+ int error;
+ Master_info *mi;
+ mi= (Master_info *) my_hash_element(&master_info_hash, i);
+
+ /*
+ Try to start all slaves that are configured (host is defined)
+ and are not already running
+ */
+ if ((mi->slave_running != MYSQL_SLAVE_RUN_CONNECT ||
+ !mi->rli.slave_running) && *mi->host)
+ {
+ if ((error= start_slave(thd, mi, 1)))
+ {
+ my_error(ER_CANT_START_STOP_SLAVE, MYF(0),
+ "START",
+ (int) mi->connection_name.length,
+ mi->connection_name.str);
+ result= 1;
+ if (error < 0) // fatal error
+ break;
+ }
+ else
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_SLAVE_STARTED, ER(ER_SLAVE_STARTED),
+ (int) mi->connection_name.length,
+ mi->connection_name.str);
+ }
+ }
+ DBUG_RETURN(result);
+}
+
+
+/**
+ Master_info_index::stop_all_slaves()
+
+ Start all slaves that was not running.
+
+ @return
+ TRUE Error
+ FALSE Everything ok.
+*/
+
+bool Master_info_index::stop_all_slaves(THD *thd)
+{
+ bool result= FALSE;
+ DBUG_ENTER("warn_if_slave_running");
+ mysql_mutex_assert_owner(&LOCK_active_mi);
+
+ for (uint i= 0; i< master_info_hash.records; ++i)
+ {
+ int error;
+ Master_info *mi;
+ mi= (Master_info *) my_hash_element(&master_info_hash, i);
+ if ((mi->slave_running != MYSQL_SLAVE_NOT_RUN ||
+ mi->rli.slave_running))
+ {
+ if ((error= stop_slave(thd, mi, 1)))
+ {
+ my_error(ER_CANT_START_STOP_SLAVE, MYF(0),
+ "STOP",
+ (int) mi->connection_name.length,
+ mi->connection_name.str);
+ result= 1;
+ if (error < 0) // Fatal error
+ break;
+ }
+ else
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_SLAVE_STOPPED, ER(ER_SLAVE_STOPPED),
+ (int) mi->connection_name.length,
+ mi->connection_name.str);
+ }
+ }
+ DBUG_RETURN(result);
+}
#endif /* HAVE_REPLICATION */
diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h
index 64525f81603..d52b2992afd 100644
--- a/sql/rpl_mi.h
+++ b/sql/rpl_mi.h
@@ -59,16 +59,23 @@ typedef struct st_mysql MYSQL;
class Master_info : public Slave_reporting_capability
{
public:
- Master_info(bool is_slave_recovery);
+ Master_info(LEX_STRING *connection_name, bool is_slave_recovery);
~Master_info();
bool shall_ignore_server_id(ulong s_id);
void clear_in_memory_info(bool all);
+ bool error()
+ {
+ /* If malloc() in initialization failed */
+ return connection_name.str == 0;
+ }
/* the variables below are needed because we can change masters on the fly */
- char master_log_name[FN_REFLEN];
+ char master_log_name[FN_REFLEN+6]; /* Room for multi-*/
char host[HOSTNAME_LENGTH*SYSTEM_CHARSET_MBMAXLEN+1];
char user[USERNAME_LENGTH*+1];
char password[MAX_PASSWORD_LENGTH*SYSTEM_CHARSET_MBMAXLEN+1];
+ LEX_STRING connection_name; /* User supplied connection name */
+ LEX_STRING cmp_connection_name; /* Connection name in lower case */
bool ssl; // enables use of SSL connection if true
char ssl_ca[FN_REFLEN], ssl_capath[FN_REFLEN], ssl_cert[FN_REFLEN];
char ssl_cipher[FN_REFLEN], ssl_key[FN_REFLEN];
@@ -130,5 +137,48 @@ int flush_master_info(Master_info* mi,
bool need_lock_relay_log);
int change_master_server_id_cmp(ulong *id1, ulong *id2);
+/*
+ Multi master are handled trough this struct.
+ Changes to this needs to be protected by LOCK_active_mi;
+*/
+
+class Master_info_index
+{
+private:
+ IO_CACHE index_file;
+ char index_file_name[FN_REFLEN];
+
+public:
+ Master_info_index();
+ ~Master_info_index();
+
+ HASH master_info_hash;
+
+ bool init_all_master_info();
+ bool write_master_name_to_index_file(LEX_STRING *connection_name,
+ bool do_sync);
+
+ bool check_duplicate_master_info(LEX_STRING *connection_name,
+ const char *host, uint port);
+ bool add_master_info(Master_info *mi, bool write_to_file);
+ bool remove_master_info(LEX_STRING *connection_name);
+ Master_info *get_master_info(LEX_STRING *connection_name,
+ MYSQL_ERROR::enum_warning_level warning);
+ bool give_error_if_slave_running();
+ bool start_all_slaves(THD *thd);
+ bool stop_all_slaves(THD *thd);
+};
+
+bool check_master_connection_name(LEX_STRING *name);
+void create_logfile_name_with_suffix(char *res_file_name, uint length,
+ const char *info_file,
+ bool append,
+ LEX_STRING *suffix);
+
+uchar *get_key_master_info(Master_info *mi, size_t *length,
+ my_bool not_used __attribute__((unused)));
+void free_key_master_info(Master_info *mi);
+
+
#endif /* HAVE_REPLICATION */
#endif /* RPL_MI_H */
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index cbd7ac8f5ef..d5cd6a3efed 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -50,7 +50,7 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery)
last_master_timestamp(0), slave_skip_counter(0),
abort_pos_wait(0), slave_run_id(0), sql_thd(0),
inited(0), abort_slave(0), slave_running(0), until_condition(UNTIL_NONE),
- until_log_pos(0), retried_trans(0),
+ until_log_pos(0), retried_trans(0), executed_entries(0),
tables_to_lock(0), tables_to_lock_count(0),
last_event_start_time(0), deferred_events(NULL),m_flags(0),
row_stmt_start_timestamp(0), long_find_row_note_printed(false),
@@ -58,6 +58,7 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery)
{
DBUG_ENTER("Relay_log_info::Relay_log_info");
+ relay_log.is_relay_log= TRUE;
#ifdef HAVE_PSI_INTERFACE
relay_log.set_psi_keys(key_RELAYLOG_LOCK_index,
key_RELAYLOG_update_cond,
@@ -69,6 +70,7 @@ Relay_log_info::Relay_log_info(bool is_slave_recovery)
group_relay_log_name[0]= event_relay_log_name[0]=
group_master_log_name[0]= 0;
until_log_name[0]= ign_master_log_name_end[0]= 0;
+ max_relay_log_size= global_system_variables.max_relay_log_size;
bzero((char*) &info_file, sizeof(info_file));
bzero((char*) &cache_buf, sizeof(cache_buf));
cached_charset_invalidate();
@@ -149,15 +151,6 @@ int init_relay_log_info(Relay_log_info* rli,
event, in flush_master_info(mi, 1, ?).
*/
- /*
- For the maximum log size, we choose max_relay_log_size if it is
- non-zero, max_binlog_size otherwise. If later the user does SET
- GLOBAL on one of these variables, fix_max_binlog_size and
- fix_max_relay_log_size will reconsider the choice (for example
- if the user changes max_relay_log_size to zero, we have to
- switch to using max_binlog_size for the relay log) and update
- rli->relay_log.max_size (and mysql_bin_log.max_size).
- */
{
/* Reports an error and returns, if the --relay-log's path
is a directory.*/
@@ -207,19 +200,34 @@ a file name for --relay-log-index option", opt_relaylog_index_name);
name_warning_sent= 1;
}
- rli->relay_log.is_relay_log= TRUE;
+ /* For multimaster, add connection name to relay log filenames */
+ Master_info* mi= rli->mi;
+ char buf_relay_logname[FN_REFLEN], buf_relaylog_index_name_buff[FN_REFLEN];
+ char *buf_relaylog_index_name= opt_relaylog_index_name;
+
+ create_logfile_name_with_suffix(buf_relay_logname, sizeof(buf_relay_logname),
+ ln, 1, &mi->connection_name);
+ ln= buf_relay_logname;
+
+ if (opt_relaylog_index_name)
+ {
+ buf_relaylog_index_name= buf_relaylog_index_name_buff;
+ create_logfile_name_with_suffix(buf_relaylog_index_name_buff,
+ sizeof(buf_relaylog_index_name_buff),
+ opt_relaylog_index_name, 0,
+ &mi->connection_name);
+ }
/*
note, that if open() fails, we'll still have index file open
but a destructor will take care of that
*/
- if (rli->relay_log.open_index_file(opt_relaylog_index_name, ln, TRUE) ||
- rli->relay_log.open(ln, LOG_BIN, 0, SEQ_READ_APPEND, 0,
- (max_relay_log_size ? max_relay_log_size :
- max_binlog_size), 1, TRUE))
+ if (rli->relay_log.open_index_file(buf_relaylog_index_name, ln, TRUE) ||
+ rli->relay_log.open(ln, LOG_BIN, 0, SEQ_READ_APPEND,
+ mi->rli.max_relay_log_size, 1, TRUE))
{
mysql_mutex_unlock(&rli->data_lock);
- sql_print_error("Failed in open_log() called from init_relay_log_info()");
+ sql_print_error("Failed when trying to open logs for '%s' in init_relay_log_info(). Error: %M", ln, my_errno);
DBUG_RETURN(1);
}
}
@@ -998,28 +1006,37 @@ int purge_relay_logs(Relay_log_info* rli, THD *thd, bool just_reset,
rli->cur_log_fd= -1;
}
- if (rli->relay_log.reset_logs(thd))
+ if (rli->relay_log.reset_logs(thd, !just_reset))
{
*errmsg = "Failed during log reset";
error=1;
goto err;
}
- /* Save name of used relay log file */
- strmake(rli->group_relay_log_name, rli->relay_log.get_log_fname(),
- sizeof(rli->group_relay_log_name)-1);
- strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(),
- sizeof(rli->event_relay_log_name)-1);
- rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE;
- if (count_relay_log_space(rli))
- {
- *errmsg= "Error counting relay log space";
- error=1;
- goto err;
- }
if (!just_reset)
+ {
+ /* Save name of used relay log file */
+ strmake(rli->group_relay_log_name, rli->relay_log.get_log_fname(),
+ sizeof(rli->group_relay_log_name)-1);
+ strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(),
+ sizeof(rli->event_relay_log_name)-1);
+ rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE;
+ rli->log_space_total= 0;
+
+ if (count_relay_log_space(rli))
+ {
+ *errmsg= "Error counting relay log space";
+ error=1;
+ goto err;
+ }
error= init_relay_log_pos(rli, rli->group_relay_log_name,
rli->group_relay_log_pos,
0 /* do not need data lock */, errmsg, 0);
+ }
+ else
+ {
+ /* Ensure relay log names are not used */
+ rli->group_relay_log_name[0]= rli->event_relay_log_name[0]= 0;
+ }
err:
#ifndef DBUG_OFF
diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h
index b989283deb4..6144d37026b 100644
--- a/sql/rpl_rli.h
+++ b/sql/rpl_rli.h
@@ -25,7 +25,6 @@
struct RPL_TABLE_LIST;
class Master_info;
-extern uint sql_slave_skip_counter;
/****************************************************************************
@@ -229,10 +228,12 @@ public:
Needed for problems when slave stops and we want to restart it
skipping one or more events in the master log that have caused
errors, and have been manually applied by DBA already.
+ Must be ulong as it's refered to from set_var.cc
*/
- volatile uint32 slave_skip_counter;
+ volatile ulong slave_skip_counter;
volatile ulong abort_pos_wait; /* Incremented on change master */
volatile ulong slave_run_id; /* Incremented on slave start */
+ ulong max_relay_log_size;
mysql_mutex_t log_space_lock;
mysql_cond_t log_space_cond;
THD * sql_thd;
@@ -286,6 +287,7 @@ public:
slave started.
*/
ulong trans_retries, retried_trans;
+ ulong executed_entries; /* For SLAVE STATUS */
/*
If the end of the hot relay log is made of master's events ignored by the
diff --git a/sql/rpl_tblmap.cc b/sql/rpl_tblmap.cc
index b7ac1b2d091..7b55911d887 100644
--- a/sql/rpl_tblmap.cc
+++ b/sql/rpl_tblmap.cc
@@ -46,7 +46,7 @@ table_mapping::table_mapping()
offsetof(entry,table_id),sizeof(ulong),
0,0,0);
/* We don't preallocate any block, this is consistent with m_free=0 above */
- init_alloc_root(&m_mem_root, TABLE_ID_HASH_SIZE*sizeof(entry), 0);
+ init_alloc_root(&m_mem_root, TABLE_ID_HASH_SIZE*sizeof(entry), 0, MYF(0));
DBUG_VOID_RETURN;
}
diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc
index 069fac1c3ec..1b9e744bcc1 100644
--- a/sql/rpl_utility.cc
+++ b/sql/rpl_utility.cc
@@ -1117,7 +1117,7 @@ bool event_checksum_test(uchar *event_buf, ulong event_len, uint8 alg)
Deferred_log_events::Deferred_log_events(Relay_log_info *rli) : last_added(NULL)
{
- my_init_dynamic_array(&array, sizeof(Log_event *), 32, 16);
+ my_init_dynamic_array(&array, sizeof(Log_event *), 32, 16, MYF(0));
}
Deferred_log_events::~Deferred_log_events()
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 4e6354d9af4..2d3e0b7fec4 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -258,7 +258,7 @@ uchar *sys_var::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base)
bool sys_var::set_default(THD *thd, enum_var_type type)
{
LEX_STRING empty={0,0};
- set_var var(type, 0, &empty, 0);
+ set_var var(type, this, &empty, 0);
if (type == OPT_GLOBAL || scope() == GLOBAL)
global_save_default(thd, &var);
diff --git a/sql/set_var.h b/sql/set_var.h
index f36c0f8b88f..f912c9fffad 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -68,13 +68,14 @@ public:
enum binlog_status_enum { VARIABLE_NOT_IN_BINLOG,
SESSION_VARIABLE_IN_BINLOG } binlog_status;
+ my_option option; ///< min, max, default values are stored here
+
protected:
typedef bool (*on_check_function)(sys_var *self, THD *thd, set_var *var);
typedef bool (*on_update_function)(sys_var *self, THD *thd, enum_var_type type);
int flags; ///< or'ed flag_enum values
const SHOW_TYPE show_val_type; ///< what value_ptr() returns for sql_show.cc
- my_option option; ///< min, max, default values are stored here
PolyLock *guard; ///< *second* lock that protects the variable
ptrdiff_t offset; ///< offset to the value from global_system_variables
on_check_function on_check;
@@ -296,6 +297,7 @@ extern SHOW_COMP_OPTION have_query_cache;
extern SHOW_COMP_OPTION have_geometry, have_rtree_keys;
extern SHOW_COMP_OPTION have_crypt;
extern SHOW_COMP_OPTION have_compress;
+extern SHOW_COMP_OPTION have_openssl;
/*
Prototypes for helper functions
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index d3b7e651b62..a6ebebeae39 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -51,79 +51,79 @@ ER_YES
spa "SI"
ukr "ТÐК"
ER_CANT_CREATE_FILE
- cze "Nemohu vytvo-Břit soubor '%-.200s' (chybový kód: %d)"
- dan "Kan ikke oprette filen '%-.200s' (Fejlkode: %d)"
- nla "Kan file '%-.200s' niet aanmaken (Errcode: %d)"
- eng "Can't create file '%-.200s' (errno: %d)"
- est "Ei suuda luua faili '%-.200s' (veakood: %d)"
- fre "Ne peut créer le fichier '%-.200s' (Errcode: %d)"
- ger "Kann Datei '%-.200s' nicht erzeugen (Fehler: %d)"
- greek "ΑδÏνατη η δημιουÏγία του αÏχείου '%-.200s' (κωδικός λάθους: %d)"
- hun "A '%-.200s' file nem hozhato letre (hibakod: %d)"
- ita "Impossibile creare il file '%-.200s' (errno: %d)"
- jpn "'%-.200s' ファイルãŒä½œã‚Œã¾ã›ã‚“ (errno: %d)"
- kor "í™”ì¼ '%-.200s'를 만들지 못했습니다. (ì—러번호: %d)"
- nor "Kan ikke opprette fila '%-.200s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje opprette fila '%-.200s' (Feilkode: %d)"
- pol "Nie można stworzyć pliku '%-.200s' (Kod błędu: %d)"
- por "Não pode criar o arquivo '%-.200s' (erro no. %d)"
- rum "Nu pot sa creez fisierul '%-.200s' (Eroare: %d)"
- rus "Ðевозможно Ñоздать файл '%-.200s' (ошибка: %d)"
- serbian "Ne mogu da kreiram file '%-.200s' (errno: %d)"
- slo "Nemôžem vytvoriť súbor '%-.200s' (chybový kód: %d)"
- spa "No puedo crear archivo '%-.200s' (Error: %d)"
- swe "Kan inte skapa filen '%-.200s' (Felkod: %d)"
- ukr "Ðе можу Ñтворити файл '%-.200s' (помилка: %d)"
+ cze "Nemohu vytvo-Břit soubor '%-.200s' (chybový kód: %M)"
+ dan "Kan ikke oprette filen '%-.200s' (Fejlkode: %M)"
+ nla "Kan file '%-.200s' niet aanmaken (Errcode: %M)"
+ eng "Can't create file '%-.200s' (errno: %M)"
+ est "Ei suuda luua faili '%-.200s' (veakood: %M)"
+ fre "Ne peut créer le fichier '%-.200s' (Errcode: %M)"
+ ger "Kann Datei '%-.200s' nicht erzeugen (Fehler: %M)"
+ greek "ΑδÏνατη η δημιουÏγία του αÏχείου '%-.200s' (κωδικός λάθους: %M)"
+ hun "A '%-.200s' file nem hozhato letre (hibakod: %M)"
+ ita "Impossibile creare il file '%-.200s' (errno: %M)"
+ jpn "'%-.200s' ファイルãŒä½œã‚Œã¾ã›ã‚“ (errno: %M)"
+ kor "í™”ì¼ '%-.200s'를 만들지 못했습니다. (ì—러번호: %M)"
+ nor "Kan ikke opprette fila '%-.200s' (Feilkode: %M)"
+ norwegian-ny "Kan ikkje opprette fila '%-.200s' (Feilkode: %M)"
+ pol "Nie można stworzyć pliku '%-.200s' (Kod błędu: %M)"
+ por "Não pode criar o arquivo '%-.200s' (erro no. %M)"
+ rum "Nu pot sa creez fisierul '%-.200s' (Eroare: %M)"
+ rus "Ðевозможно Ñоздать файл '%-.200s' (ошибка: %M)"
+ serbian "Ne mogu da kreiram file '%-.200s' (errno: %M)"
+ slo "Nemôžem vytvoriť súbor '%-.200s' (chybový kód: %M)"
+ spa "No puedo crear archivo '%-.200s' (Error: %M)"
+ swe "Kan inte skapa filen '%-.200s' (Felkod: %M)"
+ ukr "Ðе можу Ñтворити файл '%-.200s' (помилка: %M)"
ER_CANT_CREATE_TABLE
- cze "Nemohu vytvo-Břit tabulku '%-.200s' (chybový kód: %d)"
- dan "Kan ikke oprette tabellen '%-.200s' (Fejlkode: %d)"
- nla "Kan tabel '%-.200s' niet aanmaken (Errcode: %d)"
- eng "Can't create table '%-.200s' (errno: %d)"
- jps "'%-.200s' テーブルãŒä½œã‚Œã¾ã›ã‚“.(errno: %d)",
- est "Ei suuda luua tabelit '%-.200s' (veakood: %d)"
- fre "Ne peut créer la table '%-.200s' (Errcode: %d)"
- ger "Kann Tabelle '%-.200s' nicht erzeugen (Fehler: %d)"
- greek "ΑδÏνατη η δημιουÏγία του πίνακα '%-.200s' (κωδικός λάθους: %d)"
- hun "A '%-.200s' tabla nem hozhato letre (hibakod: %d)"
- ita "Impossibile creare la tabella '%-.200s' (errno: %d)"
- jpn "'%-.200s' テーブルãŒä½œã‚Œã¾ã›ã‚“.(errno: %d)"
- kor "í…Œì´ë¸” '%-.200s'를 만들지 못했습니다. (ì—러번호: %d)"
- nor "Kan ikke opprette tabellen '%-.200s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje opprette tabellen '%-.200s' (Feilkode: %d)"
- pol "Nie można stworzyć tabeli '%-.200s' (Kod błędu: %d)"
- por "Não pode criar a tabela '%-.200s' (erro no. %d)"
- rum "Nu pot sa creez tabla '%-.200s' (Eroare: %d)"
- rus "Ðевозможно Ñоздать таблицу '%-.200s' (ошибка: %d)"
- serbian "Ne mogu da kreiram tabelu '%-.200s' (errno: %d)"
- slo "Nemôžem vytvoriť tabuľku '%-.200s' (chybový kód: %d)"
- spa "No puedo crear tabla '%-.200s' (Error: %d)"
- swe "Kan inte skapa tabellen '%-.200s' (Felkod: %d)"
- ukr "Ðе можу Ñтворити таблицю '%-.200s' (помилка: %d)"
+ cze "Nemohu vytvo-Břit tabulku '%-.200s' (chybový kód: %M)"
+ dan "Kan ikke oprette tabellen '%-.200s' (Fejlkode: %M)"
+ nla "Kan tabel '%-.200s' niet aanmaken (Errcode: %M)"
+ eng "Can't create table '%-.200s' (errno: %M)"
+ jps "'%-.200s' テーブルãŒä½œã‚Œã¾ã›ã‚“.(errno: %M)",
+ est "Ei suuda luua tabelit '%-.200s' (veakood: %M)"
+ fre "Ne peut créer la table '%-.200s' (Errcode: %M)"
+ ger "Kann Tabelle '%-.200s' nicht erzeugen (Fehler: %M)"
+ greek "ΑδÏνατη η δημιουÏγία του πίνακα '%-.200s' (κωδικός λάθους: %M)"
+ hun "A '%-.200s' tabla nem hozhato letre (hibakod: %M)"
+ ita "Impossibile creare la tabella '%-.200s' (errno: %M)"
+ jpn "'%-.200s' テーブルãŒä½œã‚Œã¾ã›ã‚“.(errno: %M)"
+ kor "í…Œì´ë¸” '%-.200s'를 만들지 못했습니다. (ì—러번호: %M)"
+ nor "Kan ikke opprette tabellen '%-.200s' (Feilkode: %M)"
+ norwegian-ny "Kan ikkje opprette tabellen '%-.200s' (Feilkode: %M)"
+ pol "Nie można stworzyć tabeli '%-.200s' (Kod błędu: %M)"
+ por "Não pode criar a tabela '%-.200s' (erro no. %M)"
+ rum "Nu pot sa creez tabla '%-.200s' (Eroare: %M)"
+ rus "Ðевозможно Ñоздать таблицу '%-.200s' (ошибка: %M)"
+ serbian "Ne mogu da kreiram tabelu '%-.200s' (errno: %M)"
+ slo "Nemôžem vytvoriť tabuľku '%-.200s' (chybový kód: %M)"
+ spa "No puedo crear tabla '%-.200s' (Error: %M)"
+ swe "Kan inte skapa tabellen '%-.200s' (Felkod: %M)"
+ ukr "Ðе можу Ñтворити таблицю '%-.200s' (помилка: %M)"
ER_CANT_CREATE_DB
- cze "Nemohu vytvo-Břit databázi '%-.192s' (chybový kód: %d)"
- dan "Kan ikke oprette databasen '%-.192s' (Fejlkode: %d)"
- nla "Kan database '%-.192s' niet aanmaken (Errcode: %d)"
- eng "Can't create database '%-.192s' (errno: %d)"
- jps "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“ (errno: %d)",
- est "Ei suuda luua andmebaasi '%-.192s' (veakood: %d)"
- fre "Ne peut créer la base '%-.192s' (Erreur %d)"
- ger "Kann Datenbank '%-.192s' nicht erzeugen (Fehler: %d)"
- greek "ΑδÏνατη η δημιουÏγία της βάσης δεδομένων '%-.192s' (κωδικός λάθους: %d)"
- hun "Az '%-.192s' adatbazis nem hozhato letre (hibakod: %d)"
- ita "Impossibile creare il database '%-.192s' (errno: %d)"
- jpn "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“ (errno: %d)"
- kor "ë°ì´íƒ€ë² ì´ìФ '%-.192s'를 만들지 못했습니다.. (ì—러번호: %d)"
- nor "Kan ikke opprette databasen '%-.192s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje opprette databasen '%-.192s' (Feilkode: %d)"
- pol "Nie można stworzyć bazy danych '%-.192s' (Kod błędu: %d)"
- por "Não pode criar o banco de dados '%-.192s' (erro no. %d)"
- rum "Nu pot sa creez baza de date '%-.192s' (Eroare: %d)"
- rus "Ðевозможно Ñоздать базу данных '%-.192s' (ошибка: %d)"
- serbian "Ne mogu da kreiram bazu '%-.192s' (errno: %d)"
- slo "Nemôžem vytvoriť databázu '%-.192s' (chybový kód: %d)"
- spa "No puedo crear base de datos '%-.192s' (Error: %d)"
- swe "Kan inte skapa databasen '%-.192s' (Felkod: %d)"
- ukr "Ðе можу Ñтворити базу данних '%-.192s' (помилка: %d)"
+ cze "Nemohu vytvo-Břit databázi '%-.192s' (chybový kód: %M)"
+ dan "Kan ikke oprette databasen '%-.192s' (Fejlkode: %M)"
+ nla "Kan database '%-.192s' niet aanmaken (Errcode: %M)"
+ eng "Can't create database '%-.192s' (errno: %M)"
+ jps "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“ (errno: %M)",
+ est "Ei suuda luua andmebaasi '%-.192s' (veakood: %M)"
+ fre "Ne peut créer la base '%-.192s' (Erreur %M)"
+ ger "Kann Datenbank '%-.192s' nicht erzeugen (Fehler: %M)"
+ greek "ΑδÏνατη η δημιουÏγία της βάσης δεδομένων '%-.192s' (κωδικός λάθους: %M)"
+ hun "Az '%-.192s' adatbazis nem hozhato letre (hibakod: %M)"
+ ita "Impossibile creare il database '%-.192s' (errno: %M)"
+ jpn "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“ (errno: %M)"
+ kor "ë°ì´íƒ€ë² ì´ìФ '%-.192s'를 만들지 못했습니다.. (ì—러번호: %M)"
+ nor "Kan ikke opprette databasen '%-.192s' (Feilkode: %M)"
+ norwegian-ny "Kan ikkje opprette databasen '%-.192s' (Feilkode: %M)"
+ pol "Nie można stworzyć bazy danych '%-.192s' (Kod błędu: %M)"
+ por "Não pode criar o banco de dados '%-.192s' (erro no. %M)"
+ rum "Nu pot sa creez baza de date '%-.192s' (Eroare: %M)"
+ rus "Ðевозможно Ñоздать базу данных '%-.192s' (ошибка: %M)"
+ serbian "Ne mogu da kreiram bazu '%-.192s' (errno: %M)"
+ slo "Nemôžem vytvoriť databázu '%-.192s' (chybový kód: %M)"
+ spa "No puedo crear base de datos '%-.192s' (Error: %M)"
+ swe "Kan inte skapa databasen '%-.192s' (Felkod: %M)"
+ ukr "Ðе можу Ñтворити базу данних '%-.192s' (помилка: %M)"
ER_DB_CREATE_EXISTS
cze "Nemohu vytvo-Břit databázi '%-.192s'; databáze již existuje"
dan "Kan ikke oprette databasen '%-.192s'; databasen eksisterer"
@@ -175,80 +175,80 @@ ER_DB_DROP_EXISTS
swe "Kan inte radera databasen '%-.192s'; databasen finns inte"
ukr "Ðе можу видалити базу данних '%-.192s'. База данних не Ñ–Ñнує"
ER_DB_DROP_DELETE
- cze "Chyba p-Bři rušení databáze (nemohu vymazat '%-.192s', chyba %d)"
- dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.192s', Fejlkode %d)"
- nla "Fout bij verwijderen database (kan '%-.192s' niet verwijderen, Errcode: %d)"
- eng "Error dropping database (can't delete '%-.192s', errno: %d)"
- jps "データベース破棄エラー ('%-.192s' を削除ã§ãã¾ã›ã‚“, errno: %d)",
- est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.192s', veakood: %d)"
- fre "Ne peut effacer la base '%-.192s' (erreur %d)"
- ger "Fehler beim Löschen der Datenbank ('%-.192s' kann nicht gelöscht werden, Fehler: %d)"
- greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή της βάσης δεδομένων (αδÏνατη η διαγÏαφή '%-.192s', κωδικός λάθους: %d)"
- hun "Adatbazis megszuntetesi hiba ('%-.192s' nem torolheto, hibakod: %d)"
- ita "Errore durante la cancellazione del database (impossibile cancellare '%-.192s', errno: %d)"
- jpn "データベース破棄エラー ('%-.192s' を削除ã§ãã¾ã›ã‚“, errno: %d)"
- kor "ë°ì´íƒ€ë² ì´ìФ 제거 ì—러('%-.192s'를 삭제할 수 ì—†ì니다, ì—러번호: %d)"
- nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.192s', feil %d)"
- norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.192s', feil %d)"
- pol "Bł?d podczas usuwania bazy danych (nie można usun?ć '%-.192s', bł?d %d)"
- por "Erro ao eliminar banco de dados (não pode eliminar '%-.192s' - erro no. %d)"
- rum "Eroare dropuind baza de date (nu pot sa sterg '%-.192s', Eroare: %d)"
- rus "Ошибка при удалении базы данных (невозможно удалить '%-.192s', ошибка: %d)"
- serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.192s', errno: %d)"
- slo "Chyba pri mazaní databázy (nemôžem zmazať '%-.192s', chybový kód: %d)"
- spa "Error eliminando la base de datos(no puedo borrar '%-.192s', error %d)"
- swe "Fel vid radering av databasen (Kan inte radera '%-.192s'. Felkod: %d)"
- ukr "Ðе можу видалити базу данних (Ðе можу видалити '%-.192s', помилка: %d)"
+ cze "Chyba p-Bři rušení databáze (nemohu vymazat '%-.192s', chyba %M)"
+ dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.192s', Fejlkode %M)"
+ nla "Fout bij verwijderen database (kan '%-.192s' niet verwijderen, Errcode: %M)"
+ eng "Error dropping database (can't delete '%-.192s', errno: %M)"
+ jps "データベース破棄エラー ('%-.192s' を削除ã§ãã¾ã›ã‚“, errno: %M)",
+ est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.192s', veakood: %M)"
+ fre "Ne peut effacer la base '%-.192s' (erreur %M)"
+ ger "Fehler beim Löschen der Datenbank ('%-.192s' kann nicht gelöscht werden, Fehler: %M)"
+ greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή της βάσης δεδομένων (αδÏνατη η διαγÏαφή '%-.192s', κωδικός λάθους: %M)"
+ hun "Adatbazis megszuntetesi hiba ('%-.192s' nem torolheto, hibakod: %M)"
+ ita "Errore durante la cancellazione del database (impossibile cancellare '%-.192s', errno: %M)"
+ jpn "データベース破棄エラー ('%-.192s' を削除ã§ãã¾ã›ã‚“, errno: %M)"
+ kor "ë°ì´íƒ€ë² ì´ìФ 제거 ì—러('%-.192s'를 삭제할 수 ì—†ì니다, ì—러번호: %M)"
+ nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.192s', feil %M)"
+ norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.192s', feil %M)"
+ pol "Bł?d podczas usuwania bazy danych (nie można usun?ć '%-.192s', bł?d %M)"
+ por "Erro ao eliminar banco de dados (não pode eliminar '%-.192s' - erro no. %M)"
+ rum "Eroare dropuind baza de date (nu pot sa sterg '%-.192s', Eroare: %M)"
+ rus "Ошибка при удалении базы данных (невозможно удалить '%-.192s', ошибка: %M)"
+ serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.192s', errno: %M)"
+ slo "Chyba pri mazaní databázy (nemôžem zmazať '%-.192s', chybový kód: %M)"
+ spa "Error eliminando la base de datos(no puedo borrar '%-.192s', error %M)"
+ swe "Fel vid radering av databasen (Kan inte radera '%-.192s'. Felkod: %M)"
+ ukr "Ðе можу видалити базу данних (Ðе можу видалити '%-.192s', помилка: %M)"
ER_DB_DROP_RMDIR
- cze "Chyba p-Bři rušení databáze (nemohu vymazat adresář '%-.192s', chyba %d)"
- dan "Fejl ved sletting af database (kan ikke slette folderen '%-.192s', Fejlkode %d)"
- nla "Fout bij verwijderen database (kan rmdir '%-.192s' niet uitvoeren, Errcode: %d)"
- eng "Error dropping database (can't rmdir '%-.192s', errno: %d)"
- jps "データベース破棄エラー ('%-.192s' ã‚’ rmdir ã§ãã¾ã›ã‚“, errno: %d)",
- est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.192s', veakood: %d)"
- fre "Erreur en effaçant la base (rmdir '%-.192s', erreur %d)"
- ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.192s' kann nicht gelöscht werden, Fehler: %d)"
- greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή της βάσης δεδομένων (αδÏνατη η διαγÏαφή του φακέλλου '%-.192s', κωδικός λάθους: %d)"
- hun "Adatbazis megszuntetesi hiba ('%-.192s' nem szuntetheto meg, hibakod: %d)"
- ita "Errore durante la cancellazione del database (impossibile rmdir '%-.192s', errno: %d)"
- jpn "データベース破棄エラー ('%-.192s' ã‚’ rmdir ã§ãã¾ã›ã‚“, errno: %d)"
- kor "ë°ì´íƒ€ë² ì´ìФ 제거 ì—러(rmdir '%-.192s'를 í•  수 ì—†ì니다, ì—러번호: %d)"
- nor "Feil ved sletting av database (kan ikke slette katalogen '%-.192s', feil %d)"
- norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.192s', feil %d)"
- pol "Bł?d podczas usuwania bazy danych (nie można wykonać rmdir '%-.192s', bł?d %d)"
- por "Erro ao eliminar banco de dados (não pode remover diretório '%-.192s' - erro no. %d)"
- rum "Eroare dropuind baza de date (nu pot sa rmdir '%-.192s', Eroare: %d)"
- rus "Ðевозможно удалить базу данных (невозможно удалить каталог '%-.192s', ошибка: %d)"
- serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.192s', errno: %d)"
- slo "Chyba pri mazaní databázy (nemôžem vymazať adresár '%-.192s', chybový kód: %d)"
- spa "Error eliminando la base de datos (No puedo borrar directorio '%-.192s', error %d)"
- swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.192s'. Felkod: %d)"
- ukr "Ðе можу видалити базу данних (Ðе можу видалити теку '%-.192s', помилка: %d)"
+ cze "Chyba p-Bři rušení databáze (nemohu vymazat adresář '%-.192s', chyba %M)"
+ dan "Fejl ved sletting af database (kan ikke slette folderen '%-.192s', Fejlkode %M)"
+ nla "Fout bij verwijderen database (kan rmdir '%-.192s' niet uitvoeren, Errcode: %M)"
+ eng "Error dropping database (can't rmdir '%-.192s', errno: %M)"
+ jps "データベース破棄エラー ('%-.192s' ã‚’ rmdir ã§ãã¾ã›ã‚“, errno: %M)",
+ est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.192s', veakood: %M)"
+ fre "Erreur en effaçant la base (rmdir '%-.192s', erreur %M)"
+ ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.192s' kann nicht gelöscht werden, Fehler: %M)"
+ greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή της βάσης δεδομένων (αδÏνατη η διαγÏαφή του φακέλλου '%-.192s', κωδικός λάθους: %M)"
+ hun "Adatbazis megszuntetesi hiba ('%-.192s' nem szuntetheto meg, hibakod: %M)"
+ ita "Errore durante la cancellazione del database (impossibile rmdir '%-.192s', errno: %M)"
+ jpn "データベース破棄エラー ('%-.192s' ã‚’ rmdir ã§ãã¾ã›ã‚“, errno: %M)"
+ kor "ë°ì´íƒ€ë² ì´ìФ 제거 ì—러(rmdir '%-.192s'를 í•  수 ì—†ì니다, ì—러번호: %M)"
+ nor "Feil ved sletting av database (kan ikke slette katalogen '%-.192s', feil %M)"
+ norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.192s', feil %M)"
+ pol "Bł?d podczas usuwania bazy danych (nie można wykonać rmdir '%-.192s', bł?d %M)"
+ por "Erro ao eliminar banco de dados (não pode remover diretório '%-.192s' - erro no. %M)"
+ rum "Eroare dropuind baza de date (nu pot sa rmdir '%-.192s', Eroare: %M)"
+ rus "Ðевозможно удалить базу данных (невозможно удалить каталог '%-.192s', ошибка: %M)"
+ serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.192s', errno: %M)"
+ slo "Chyba pri mazaní databázy (nemôžem vymazať adresár '%-.192s', chybový kód: %M)"
+ spa "Error eliminando la base de datos (No puedo borrar directorio '%-.192s', error %M)"
+ swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.192s'. Felkod: %M)"
+ ukr "Ðе можу видалити базу данних (Ðе можу видалити теку '%-.192s', помилка: %M)"
ER_CANT_DELETE_FILE
- cze "Chyba p-Bři výmazu '%-.192s' (chybový kód: %d)"
- dan "Fejl ved sletning af '%-.192s' (Fejlkode: %d)"
- nla "Fout bij het verwijderen van '%-.192s' (Errcode: %d)"
- eng "Error on delete of '%-.192s' (errno: %d)"
- jps "'%-.192s' ã®å‰Šé™¤ãŒã‚¨ãƒ©ãƒ¼ (errno: %d)",
- est "Viga '%-.192s' kustutamisel (veakood: %d)"
- fre "Erreur en effaçant '%-.192s' (Errcode: %d)"
- ger "Fehler beim Löschen von '%-.192s' (Fehler: %d)"
- greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή '%-.192s' (κωδικός λάθους: %d)"
- hun "Torlesi hiba: '%-.192s' (hibakod: %d)"
- ita "Errore durante la cancellazione di '%-.192s' (errno: %d)"
- jpn "'%-.192s' ã®å‰Šé™¤ãŒã‚¨ãƒ©ãƒ¼ (errno: %d)"
- kor "'%-.192s' ì‚­ì œ 중 ì—러 (ì—러번호: %d)"
- nor "Feil ved sletting av '%-.192s' (Feilkode: %d)"
- norwegian-ny "Feil ved sletting av '%-.192s' (Feilkode: %d)"
- pol "Bł?d podczas usuwania '%-.192s' (Kod błędu: %d)"
- por "Erro na remoção de '%-.192s' (erro no. %d)"
- rum "Eroare incercind sa delete '%-.192s' (Eroare: %d)"
- rus "Ошибка при удалении '%-.192s' (ошибка: %d)"
- serbian "Greška pri brisanju '%-.192s' (errno: %d)"
- slo "Chyba pri mazaní '%-.192s' (chybový kód: %d)"
- spa "Error en el borrado de '%-.192s' (Error: %d)"
- swe "Kan inte radera filen '%-.192s' (Felkod: %d)"
- ukr "Ðе можу видалити '%-.192s' (помилка: %d)"
+ cze "Chyba p-Bři výmazu '%-.192s' (chybový kód: %M)"
+ dan "Fejl ved sletning af '%-.192s' (Fejlkode: %M)"
+ nla "Fout bij het verwijderen van '%-.192s' (Errcode: %M)"
+ eng "Error on delete of '%-.192s' (errno: %M)"
+ jps "'%-.192s' ã®å‰Šé™¤ãŒã‚¨ãƒ©ãƒ¼ (errno: %M)",
+ est "Viga '%-.192s' kustutamisel (veakood: %M)"
+ fre "Erreur en effaçant '%-.192s' (Errcode: %M)"
+ ger "Fehler beim Löschen von '%-.192s' (Fehler: %M)"
+ greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή '%-.192s' (κωδικός λάθους: %M)"
+ hun "Torlesi hiba: '%-.192s' (hibakod: %M)"
+ ita "Errore durante la cancellazione di '%-.192s' (errno: %M)"
+ jpn "'%-.192s' ã®å‰Šé™¤ãŒã‚¨ãƒ©ãƒ¼ (errno: %M)"
+ kor "'%-.192s' ì‚­ì œ 중 ì—러 (ì—러번호: %M)"
+ nor "Feil ved sletting av '%-.192s' (Feilkode: %M)"
+ norwegian-ny "Feil ved sletting av '%-.192s' (Feilkode: %M)"
+ pol "Bł?d podczas usuwania '%-.192s' (Kod błędu: %M)"
+ por "Erro na remoção de '%-.192s' (erro no. %M)"
+ rum "Eroare incercind sa delete '%-.192s' (Eroare: %M)"
+ rus "Ошибка при удалении '%-.192s' (ошибка: %M)"
+ serbian "Greška pri brisanju '%-.192s' (errno: %M)"
+ slo "Chyba pri mazaní '%-.192s' (chybový kód: %M)"
+ spa "Error en el borrado de '%-.192s' (Error: %M)"
+ swe "Kan inte radera filen '%-.192s' (Felkod: %M)"
+ ukr "Ðе можу видалити '%-.192s' (помилка: %M)"
ER_CANT_FIND_SYSTEM_REC
cze "Nemohu -BÄíst záznam v systémové tabulce"
dan "Kan ikke læse posten i systemfolderen"
@@ -275,180 +275,180 @@ ER_CANT_FIND_SYSTEM_REC
swe "Hittar inte posten i systemregistret"
ukr "Ðе можу зчитати Ð·Ð°Ð¿Ð¸Ñ Ð· ÑиÑтемної таблиці"
ER_CANT_GET_STAT
- cze "Nemohu z-Bískat stav '%-.200s' (chybový kód: %d)"
- dan "Kan ikke læse status af '%-.200s' (Fejlkode: %d)"
- nla "Kan de status niet krijgen van '%-.200s' (Errcode: %d)"
- eng "Can't get status of '%-.200s' (errno: %d)"
- jps "'%-.200s' ã®ã‚¹ãƒ†ã‚¤ã‚¿ã‚¹ãŒå¾—られã¾ã›ã‚“. (errno: %d)",
- est "Ei suuda lugeda '%-.200s' olekut (veakood: %d)"
- fre "Ne peut obtenir le status de '%-.200s' (Errcode: %d)"
- ger "Kann Status von '%-.200s' nicht ermitteln (Fehler: %d)"
- greek "ΑδÏνατη η λήψη πληÏοφοÏιών για την κατάσταση του '%-.200s' (κωδικός λάθους: %d)"
- hun "A(z) '%-.200s' statusza nem allapithato meg (hibakod: %d)"
- ita "Impossibile leggere lo stato di '%-.200s' (errno: %d)"
- jpn "'%-.200s' ã®ã‚¹ãƒ†ã‚¤ã‚¿ã‚¹ãŒå¾—られã¾ã›ã‚“. (errno: %d)"
- kor "'%-.200s'ì˜ ìƒíƒœë¥¼ 얻지 못했습니다. (ì—러번호: %d)"
- nor "Kan ikke lese statusen til '%-.200s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje lese statusen til '%-.200s' (Feilkode: %d)"
- pol "Nie można otrzymać statusu '%-.200s' (Kod błędu: %d)"
- por "Não pode obter o status de '%-.200s' (erro no. %d)"
- rum "Nu pot sa obtin statusul lui '%-.200s' (Eroare: %d)"
- rus "Ðевозможно получить ÑтатуÑную информацию о '%-.200s' (ошибка: %d)"
- serbian "Ne mogu da dobijem stanje file-a '%-.200s' (errno: %d)"
- slo "Nemôžem zistiť stav '%-.200s' (chybový kód: %d)"
- spa "No puedo obtener el estado de '%-.200s' (Error: %d)"
- swe "Kan inte läsa filinformationen (stat) från '%-.200s' (Felkod: %d)"
- ukr "Ðе можу отримати ÑÑ‚Ð°Ñ‚ÑƒÑ '%-.200s' (помилка: %d)"
+ cze "Nemohu z-Bískat stav '%-.200s' (chybový kód: %M)"
+ dan "Kan ikke læse status af '%-.200s' (Fejlkode: %M)"
+ nla "Kan de status niet krijgen van '%-.200s' (Errcode: %M)"
+ eng "Can't get status of '%-.200s' (errno: %M)"
+ jps "'%-.200s' ã®ã‚¹ãƒ†ã‚¤ã‚¿ã‚¹ãŒå¾—られã¾ã›ã‚“. (errno: %M)",
+ est "Ei suuda lugeda '%-.200s' olekut (veakood: %M)"
+ fre "Ne peut obtenir le status de '%-.200s' (Errcode: %M)"
+ ger "Kann Status von '%-.200s' nicht ermitteln (Fehler: %M)"
+ greek "ΑδÏνατη η λήψη πληÏοφοÏιών για την κατάσταση του '%-.200s' (κωδικός λάθους: %M)"
+ hun "A(z) '%-.200s' statusza nem allapithato meg (hibakod: %M)"
+ ita "Impossibile leggere lo stato di '%-.200s' (errno: %M)"
+ jpn "'%-.200s' ã®ã‚¹ãƒ†ã‚¤ã‚¿ã‚¹ãŒå¾—られã¾ã›ã‚“. (errno: %M)"
+ kor "'%-.200s'ì˜ ìƒíƒœë¥¼ 얻지 못했습니다. (ì—러번호: %M)"
+ nor "Kan ikke lese statusen til '%-.200s' (Feilkode: %M)"
+ norwegian-ny "Kan ikkje lese statusen til '%-.200s' (Feilkode: %M)"
+ pol "Nie można otrzymać statusu '%-.200s' (Kod błędu: %M)"
+ por "Não pode obter o status de '%-.200s' (erro no. %M)"
+ rum "Nu pot sa obtin statusul lui '%-.200s' (Eroare: %M)"
+ rus "Ðевозможно получить ÑтатуÑную информацию о '%-.200s' (ошибка: %M)"
+ serbian "Ne mogu da dobijem stanje file-a '%-.200s' (errno: %M)"
+ slo "Nemôžem zistiť stav '%-.200s' (chybový kód: %M)"
+ spa "No puedo obtener el estado de '%-.200s' (Error: %M)"
+ swe "Kan inte läsa filinformationen (stat) från '%-.200s' (Felkod: %M)"
+ ukr "Ðе можу отримати ÑÑ‚Ð°Ñ‚ÑƒÑ '%-.200s' (помилка: %M)"
ER_CANT_GET_WD
- cze "Chyba p-Bři zjišťování pracovní adresář (chybový kód: %d)"
- dan "Kan ikke læse aktive folder (Fejlkode: %d)"
- nla "Kan de werkdirectory niet krijgen (Errcode: %d)"
- eng "Can't get working directory (errno: %d)"
- jps "working directory を得る事ãŒã§ãã¾ã›ã‚“ã§ã—㟠(errno: %d)",
- est "Ei suuda identifitseerida jooksvat kataloogi (veakood: %d)"
- fre "Ne peut obtenir le répertoire de travail (Errcode: %d)"
- ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %d)"
- greek "Ο φάκελλος εÏγασίας δεν βÏέθηκε (κωδικός λάθους: %d)"
- hun "A munkakonyvtar nem allapithato meg (hibakod: %d)"
- ita "Impossibile leggere la directory di lavoro (errno: %d)"
- jpn "working directory を得る事ãŒã§ãã¾ã›ã‚“ã§ã—㟠(errno: %d)"
- kor "수행 디렉토리를 찾지 못했습니다. (ì—러번호: %d)"
- nor "Kan ikke lese aktiv katalog(Feilkode: %d)"
- norwegian-ny "Kan ikkje lese aktiv katalog(Feilkode: %d)"
- pol "Nie można rozpoznać aktualnego katalogu (Kod błędu: %d)"
- por "Não pode obter o diretório corrente (erro no. %d)"
- rum "Nu pot sa obtin directorul current (working directory) (Eroare: %d)"
- rus "Ðевозможно определить рабочий каталог (ошибка: %d)"
- serbian "Ne mogu da dobijem trenutni direktorijum (errno: %d)"
- slo "Nemôžem zistiť pracovný adresár (chybový kód: %d)"
- spa "No puedo acceder al directorio (Error: %d)"
- swe "Kan inte inte läsa aktivt bibliotek. (Felkod: %d)"
- ukr "Ðе можу визначити робочу теку (помилка: %d)"
+ cze "Chyba p-Bři zjišťování pracovní adresář (chybový kód: %M)"
+ dan "Kan ikke læse aktive folder (Fejlkode: %M)"
+ nla "Kan de werkdirectory niet krijgen (Errcode: %M)"
+ eng "Can't get working directory (errno: %M)"
+ jps "working directory を得る事ãŒã§ãã¾ã›ã‚“ã§ã—㟠(errno: %M)",
+ est "Ei suuda identifitseerida jooksvat kataloogi (veakood: %M)"
+ fre "Ne peut obtenir le répertoire de travail (Errcode: %M)"
+ ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %M)"
+ greek "Ο φάκελλος εÏγασίας δεν βÏέθηκε (κωδικός λάθους: %M)"
+ hun "A munkakonyvtar nem allapithato meg (hibakod: %M)"
+ ita "Impossibile leggere la directory di lavoro (errno: %M)"
+ jpn "working directory を得る事ãŒã§ãã¾ã›ã‚“ã§ã—㟠(errno: %M)"
+ kor "수행 디렉토리를 찾지 못했습니다. (ì—러번호: %M)"
+ nor "Kan ikke lese aktiv katalog(Feilkode: %M)"
+ norwegian-ny "Kan ikkje lese aktiv katalog(Feilkode: %M)"
+ pol "Nie można rozpoznać aktualnego katalogu (Kod błędu: %M)"
+ por "Não pode obter o diretório corrente (erro no. %M)"
+ rum "Nu pot sa obtin directorul current (working directory) (Eroare: %M)"
+ rus "Ðевозможно определить рабочий каталог (ошибка: %M)"
+ serbian "Ne mogu da dobijem trenutni direktorijum (errno: %M)"
+ slo "Nemôžem zistiť pracovný adresár (chybový kód: %M)"
+ spa "No puedo acceder al directorio (Error: %M)"
+ swe "Kan inte inte läsa aktivt bibliotek. (Felkod: %M)"
+ ukr "Ðе можу визначити робочу теку (помилка: %M)"
ER_CANT_LOCK
- cze "Nemohu uzamknout soubor (chybov-Bý kód: %d)"
- dan "Kan ikke låse fil (Fejlkode: %d)"
- nla "Kan de file niet blokeren (Errcode: %d)"
- eng "Can't lock file (errno: %d)"
- jps "ファイルをロックã§ãã¾ã›ã‚“ (errno: %d)",
- est "Ei suuda lukustada faili (veakood: %d)"
- fre "Ne peut verrouiller le fichier (Errcode: %d)"
- ger "Datei kann nicht gesperrt werden (Fehler: %d)"
- greek "Το αÏχείο δεν μποÏεί να κλειδωθεί (κωδικός λάθους: %d)"
- hun "A file nem zarolhato. (hibakod: %d)"
- ita "Impossibile il locking il file (errno: %d)"
- jpn "ファイルをロックã§ãã¾ã›ã‚“ (errno: %d)"
- kor "í™”ì¼ì„ 잠그지(lock) 못했습니다. (ì—러번호: %d)"
- nor "Kan ikke låse fila (Feilkode: %d)"
- norwegian-ny "Kan ikkje låse fila (Feilkode: %d)"
- pol "Nie można zablokować pliku (Kod błędu: %d)"
- por "Não pode travar o arquivo (erro no. %d)"
- rum "Nu pot sa lock fisierul (Eroare: %d)"
- rus "Ðевозможно поÑтавить блокировку на файле (ошибка: %d)"
- serbian "Ne mogu da zakljuÄam file (errno: %d)"
- slo "Nemôžem zamknúť súbor (chybový kód: %d)"
- spa "No puedo bloquear archivo: (Error: %d)"
- swe "Kan inte låsa filen. (Felkod: %d)"
- ukr "Ðе можу заблокувати файл (помилка: %d)"
+ cze "Nemohu uzamknout soubor (chybov-Bý kód: %M)"
+ dan "Kan ikke låse fil (Fejlkode: %M)"
+ nla "Kan de file niet blokeren (Errcode: %M)"
+ eng "Can't lock file (errno: %M)"
+ jps "ファイルをロックã§ãã¾ã›ã‚“ (errno: %M)",
+ est "Ei suuda lukustada faili (veakood: %M)"
+ fre "Ne peut verrouiller le fichier (Errcode: %M)"
+ ger "Datei kann nicht gesperrt werden (Fehler: %M)"
+ greek "Το αÏχείο δεν μποÏεί να κλειδωθεί (κωδικός λάθους: %M)"
+ hun "A file nem zarolhato. (hibakod: %M)"
+ ita "Impossibile il locking il file (errno: %M)"
+ jpn "ファイルをロックã§ãã¾ã›ã‚“ (errno: %M)"
+ kor "í™”ì¼ì„ 잠그지(lock) 못했습니다. (ì—러번호: %M)"
+ nor "Kan ikke låse fila (Feilkode: %M)"
+ norwegian-ny "Kan ikkje låse fila (Feilkode: %M)"
+ pol "Nie można zablokować pliku (Kod błędu: %M)"
+ por "Não pode travar o arquivo (erro no. %M)"
+ rum "Nu pot sa lock fisierul (Eroare: %M)"
+ rus "Ðевозможно поÑтавить блокировку на файле (ошибка: %M)"
+ serbian "Ne mogu da zakljuÄam file (errno: %M)"
+ slo "Nemôžem zamknúť súbor (chybový kód: %M)"
+ spa "No puedo bloquear archivo: (Error: %M)"
+ swe "Kan inte låsa filen. (Felkod: %M)"
+ ukr "Ðе можу заблокувати файл (помилка: %M)"
ER_CANT_OPEN_FILE
- cze "Nemohu otev-Břít soubor '%-.200s' (chybový kód: %d)"
- dan "Kan ikke åbne fil: '%-.200s' (Fejlkode: %d)"
- nla "Kan de file '%-.200s' niet openen (Errcode: %d)"
- eng "Can't open file: '%-.200s' (errno: %d)"
- jps "'%-.200s' ファイルを開ã事ãŒã§ãã¾ã›ã‚“ (errno: %d)",
- est "Ei suuda avada faili '%-.200s' (veakood: %d)"
- fre "Ne peut ouvrir le fichier: '%-.200s' (Errcode: %d)"
- ger "Kann Datei '%-.200s' nicht öffnen (Fehler: %d)"
- greek "Δεν είναι δυνατό να ανοιχτεί το αÏχείο: '%-.200s' (κωδικός λάθους: %d)"
- hun "A '%-.200s' file nem nyithato meg (hibakod: %d)"
- ita "Impossibile aprire il file: '%-.200s' (errno: %d)"
- jpn "'%-.200s' ファイルを開ã事ãŒã§ãã¾ã›ã‚“ (errno: %d)"
- kor "í™”ì¼ì„ ì—´ì§€ 못했습니다.: '%-.200s' (ì—러번호: %d)"
- nor "Kan ikke åpne fila: '%-.200s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje åpne fila: '%-.200s' (Feilkode: %d)"
- pol "Nie można otworzyć pliku: '%-.200s' (Kod błędu: %d)"
- por "Não pode abrir o arquivo '%-.200s' (erro no. %d)"
- rum "Nu pot sa deschid fisierul: '%-.200s' (Eroare: %d)"
- rus "Ðевозможно открыть файл: '%-.200s' (ошибка: %d)"
- serbian "Ne mogu da otvorim file: '%-.200s' (errno: %d)"
- slo "Nemôžem otvoriť súbor: '%-.200s' (chybový kód: %d)"
- spa "No puedo abrir archivo: '%-.200s' (Error: %d)"
- swe "Kan inte använda '%-.200s' (Felkod: %d)"
- ukr "Ðе можу відкрити файл: '%-.200s' (помилка: %d)"
+ cze "Nemohu otev-Břít soubor '%-.200s' (chybový kód: %M)"
+ dan "Kan ikke åbne fil: '%-.200s' (Fejlkode: %M)"
+ nla "Kan de file '%-.200s' niet openen (Errcode: %M)"
+ eng "Can't open file: '%-.200s' (errno: %M)"
+ jps "'%-.200s' ファイルを開ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)",
+ est "Ei suuda avada faili '%-.200s' (veakood: %M)"
+ fre "Ne peut ouvrir le fichier: '%-.200s' (Errcode: %M)"
+ ger "Kann Datei '%-.200s' nicht öffnen (Fehler: %M)"
+ greek "Δεν είναι δυνατό να ανοιχτεί το αÏχείο: '%-.200s' (κωδικός λάθους: %M)"
+ hun "A '%-.200s' file nem nyithato meg (hibakod: %M)"
+ ita "Impossibile aprire il file: '%-.200s' (errno: %M)"
+ jpn "'%-.200s' ファイルを開ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)"
+ kor "í™”ì¼ì„ ì—´ì§€ 못했습니다.: '%-.200s' (ì—러번호: %M)"
+ nor "Kan ikke åpne fila: '%-.200s' (Feilkode: %M)"
+ norwegian-ny "Kan ikkje åpne fila: '%-.200s' (Feilkode: %M)"
+ pol "Nie można otworzyć pliku: '%-.200s' (Kod błędu: %M)"
+ por "Não pode abrir o arquivo '%-.200s' (erro no. %M)"
+ rum "Nu pot sa deschid fisierul: '%-.200s' (Eroare: %M)"
+ rus "Ðевозможно открыть файл: '%-.200s' (ошибка: %M)"
+ serbian "Ne mogu da otvorim file: '%-.200s' (errno: %M)"
+ slo "Nemôžem otvoriť súbor: '%-.200s' (chybový kód: %M)"
+ spa "No puedo abrir archivo: '%-.200s' (Error: %M)"
+ swe "Kan inte använda '%-.200s' (Felkod: %M)"
+ ukr "Ðе можу відкрити файл: '%-.200s' (помилка: %M)"
ER_FILE_NOT_FOUND
- cze "Nemohu naj-Bít soubor '%-.200s' (chybový kód: %d)"
- dan "Kan ikke finde fila: '%-.200s' (Fejlkode: %d)"
- nla "Kan de file: '%-.200s' niet vinden (Errcode: %d)"
- eng "Can't find file: '%-.200s' (errno: %d)"
- jps "'%-.200s' ファイルを見付ã‘る事ãŒã§ãã¾ã›ã‚“.(errno: %d)",
- est "Ei suuda leida faili '%-.200s' (veakood: %d)"
- fre "Ne peut trouver le fichier: '%-.200s' (Errcode: %d)"
- ger "Kann Datei '%-.200s' nicht finden (Fehler: %d)"
- greek "Δεν βÏέθηκε το αÏχείο: '%-.200s' (κωδικός λάθους: %d)"
- hun "A(z) '%-.200s' file nem talalhato (hibakod: %d)"
- ita "Impossibile trovare il file: '%-.200s' (errno: %d)"
- jpn "'%-.200s' ファイルを見付ã‘る事ãŒã§ãã¾ã›ã‚“.(errno: %d)"
- kor "í™”ì¼ì„ 찾지 못했습니다.: '%-.200s' (ì—러번호: %d)"
- nor "Kan ikke finne fila: '%-.200s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje finne fila: '%-.200s' (Feilkode: %d)"
- pol "Nie można znaleĽć pliku: '%-.200s' (Kod błędu: %d)"
- por "Não pode encontrar o arquivo '%-.200s' (erro no. %d)"
- rum "Nu pot sa gasesc fisierul: '%-.200s' (Eroare: %d)"
- rus "Ðевозможно найти файл: '%-.200s' (ошибка: %d)"
- serbian "Ne mogu da pronađem file: '%-.200s' (errno: %d)"
- slo "Nemôžem nájsť súbor: '%-.200s' (chybový kód: %d)"
- spa "No puedo encontrar archivo: '%-.200s' (Error: %d)"
- swe "Hittar inte filen '%-.200s' (Felkod: %d)"
- ukr "Ðе можу знайти файл: '%-.200s' (помилка: %d)"
+ cze "Nemohu naj-Bít soubor '%-.200s' (chybový kód: %M)"
+ dan "Kan ikke finde fila: '%-.200s' (Fejlkode: %M)"
+ nla "Kan de file: '%-.200s' niet vinden (Errcode: %M)"
+ eng "Can't find file: '%-.200s' (errno: %M)"
+ jps "'%-.200s' ファイルを見付ã‘る事ãŒã§ãã¾ã›ã‚“.(errno: %M)",
+ est "Ei suuda leida faili '%-.200s' (veakood: %M)"
+ fre "Ne peut trouver le fichier: '%-.200s' (Errcode: %M)"
+ ger "Kann Datei '%-.200s' nicht finden (Fehler: %M)"
+ greek "Δεν βÏέθηκε το αÏχείο: '%-.200s' (κωδικός λάθους: %M)"
+ hun "A(z) '%-.200s' file nem talalhato (hibakod: %M)"
+ ita "Impossibile trovare il file: '%-.200s' (errno: %M)"
+ jpn "'%-.200s' ファイルを見付ã‘る事ãŒã§ãã¾ã›ã‚“.(errno: %M)"
+ kor "í™”ì¼ì„ 찾지 못했습니다.: '%-.200s' (ì—러번호: %M)"
+ nor "Kan ikke finne fila: '%-.200s' (Feilkode: %M)"
+ norwegian-ny "Kan ikkje finne fila: '%-.200s' (Feilkode: %M)"
+ pol "Nie można znaleĽć pliku: '%-.200s' (Kod błędu: %M)"
+ por "Não pode encontrar o arquivo '%-.200s' (erro no. %M)"
+ rum "Nu pot sa gasesc fisierul: '%-.200s' (Eroare: %M)"
+ rus "Ðевозможно найти файл: '%-.200s' (ошибка: %M)"
+ serbian "Ne mogu da pronađem file: '%-.200s' (errno: %M)"
+ slo "Nemôžem nájsť súbor: '%-.200s' (chybový kód: %M)"
+ spa "No puedo encontrar archivo: '%-.200s' (Error: %M)"
+ swe "Hittar inte filen '%-.200s' (Felkod: %M)"
+ ukr "Ðе можу знайти файл: '%-.200s' (помилка: %M)"
ER_CANT_READ_DIR
- cze "Nemohu -BÄíst adresář '%-.192s' (chybový kód: %d)"
- dan "Kan ikke læse folder '%-.192s' (Fejlkode: %d)"
- nla "Kan de directory niet lezen van '%-.192s' (Errcode: %d)"
- eng "Can't read dir of '%-.192s' (errno: %d)"
- jps "'%-.192s' ディレクトリãŒèª­ã‚ã¾ã›ã‚“.(errno: %d)",
- est "Ei suuda lugeda kataloogi '%-.192s' (veakood: %d)"
- fre "Ne peut lire le répertoire de '%-.192s' (Errcode: %d)"
- ger "Verzeichnis von '%-.192s' nicht lesbar (Fehler: %d)"
- greek "Δεν είναι δυνατό να διαβαστεί ο φάκελλος του '%-.192s' (κωδικός λάθους: %d)"
- hun "A(z) '%-.192s' konyvtar nem olvashato. (hibakod: %d)"
- ita "Impossibile leggere la directory di '%-.192s' (errno: %d)"
- jpn "'%-.192s' ディレクトリãŒèª­ã‚ã¾ã›ã‚“.(errno: %d)"
- kor "'%-.192s'디렉토리를 ì½ì§€ 못했습니다. (ì—러번호: %d)"
- nor "Kan ikke lese katalogen '%-.192s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje lese katalogen '%-.192s' (Feilkode: %d)"
- pol "Nie można odczytać katalogu '%-.192s' (Kod błędu: %d)"
- por "Não pode ler o diretório de '%-.192s' (erro no. %d)"
- rum "Nu pot sa citesc directorul '%-.192s' (Eroare: %d)"
- rus "Ðевозможно прочитать каталог '%-.192s' (ошибка: %d)"
- serbian "Ne mogu da proÄitam direktorijum '%-.192s' (errno: %d)"
- slo "Nemôžem ÄítaÅ¥ adresár '%-.192s' (chybový kód: %d)"
- spa "No puedo leer el directorio de '%-.192s' (Error: %d)"
- swe "Kan inte läsa från bibliotek '%-.192s' (Felkod: %d)"
- ukr "Ðе можу прочитати теку '%-.192s' (помилка: %d)"
+ cze "Nemohu -BÄíst adresář '%-.192s' (chybový kód: %M)"
+ dan "Kan ikke læse folder '%-.192s' (Fejlkode: %M)"
+ nla "Kan de directory niet lezen van '%-.192s' (Errcode: %M)"
+ eng "Can't read dir of '%-.192s' (errno: %M)"
+ jps "'%-.192s' ディレクトリãŒèª­ã‚ã¾ã›ã‚“.(errno: %M)",
+ est "Ei suuda lugeda kataloogi '%-.192s' (veakood: %M)"
+ fre "Ne peut lire le répertoire de '%-.192s' (Errcode: %M)"
+ ger "Verzeichnis von '%-.192s' nicht lesbar (Fehler: %M)"
+ greek "Δεν είναι δυνατό να διαβαστεί ο φάκελλος του '%-.192s' (κωδικός λάθους: %M)"
+ hun "A(z) '%-.192s' konyvtar nem olvashato. (hibakod: %M)"
+ ita "Impossibile leggere la directory di '%-.192s' (errno: %M)"
+ jpn "'%-.192s' ディレクトリãŒèª­ã‚ã¾ã›ã‚“.(errno: %M)"
+ kor "'%-.192s'디렉토리를 ì½ì§€ 못했습니다. (ì—러번호: %M)"
+ nor "Kan ikke lese katalogen '%-.192s' (Feilkode: %M)"
+ norwegian-ny "Kan ikkje lese katalogen '%-.192s' (Feilkode: %M)"
+ pol "Nie można odczytać katalogu '%-.192s' (Kod błędu: %M)"
+ por "Não pode ler o diretório de '%-.192s' (erro no. %M)"
+ rum "Nu pot sa citesc directorul '%-.192s' (Eroare: %M)"
+ rus "Ðевозможно прочитать каталог '%-.192s' (ошибка: %M)"
+ serbian "Ne mogu da proÄitam direktorijum '%-.192s' (errno: %M)"
+ slo "Nemôžem ÄítaÅ¥ adresár '%-.192s' (chybový kód: %M)"
+ spa "No puedo leer el directorio de '%-.192s' (Error: %M)"
+ swe "Kan inte läsa från bibliotek '%-.192s' (Felkod: %M)"
+ ukr "Ðе можу прочитати теку '%-.192s' (помилка: %M)"
ER_CANT_SET_WD
- cze "Nemohu zm-Běnit adresář na '%-.192s' (chybový kód: %d)"
- dan "Kan ikke skifte folder til '%-.192s' (Fejlkode: %d)"
- nla "Kan de directory niet veranderen naar '%-.192s' (Errcode: %d)"
- eng "Can't change dir to '%-.192s' (errno: %d)"
- jps "'%-.192s' ディレクトリ㫠chdir ã§ãã¾ã›ã‚“.(errno: %d)",
- est "Ei suuda siseneda kataloogi '%-.192s' (veakood: %d)"
- fre "Ne peut changer le répertoire pour '%-.192s' (Errcode: %d)"
- ger "Kann nicht in das Verzeichnis '%-.192s' wechseln (Fehler: %d)"
- greek "ΑδÏνατη η αλλαγή του Ï„Ïέχοντος καταλόγου σε '%-.192s' (κωδικός λάθους: %d)"
- hun "Konyvtarvaltas nem lehetseges a(z) '%-.192s'-ba. (hibakod: %d)"
- ita "Impossibile cambiare la directory in '%-.192s' (errno: %d)"
- jpn "'%-.192s' ディレクトリ㫠chdir ã§ãã¾ã›ã‚“.(errno: %d)"
- kor "'%-.192s'디렉토리로 ì´ë™í•  수 없었습니다. (ì—러번호: %d)"
- nor "Kan ikke skifte katalog til '%-.192s' (Feilkode: %d)"
- norwegian-ny "Kan ikkje skifte katalog til '%-.192s' (Feilkode: %d)"
- pol "Nie można zmienić katalogu na '%-.192s' (Kod błędu: %d)"
- por "Não pode mudar para o diretório '%-.192s' (erro no. %d)"
- rum "Nu pot sa schimb directorul '%-.192s' (Eroare: %d)"
- rus "Ðевозможно перейти в каталог '%-.192s' (ошибка: %d)"
- serbian "Ne mogu da promenim direktorijum na '%-.192s' (errno: %d)"
- slo "Nemôžem vojsť do adresára '%-.192s' (chybový kód: %d)"
- spa "No puedo cambiar al directorio de '%-.192s' (Error: %d)"
- swe "Kan inte byta till '%-.192s' (Felkod: %d)"
- ukr "Ðе можу перейти у теку '%-.192s' (помилка: %d)"
+ cze "Nemohu zm-Běnit adresář na '%-.192s' (chybový kód: %M)"
+ dan "Kan ikke skifte folder til '%-.192s' (Fejlkode: %M)"
+ nla "Kan de directory niet veranderen naar '%-.192s' (Errcode: %M)"
+ eng "Can't change dir to '%-.192s' (errno: %M)"
+ jps "'%-.192s' ディレクトリ㫠chdir ã§ãã¾ã›ã‚“.(errno: %M)",
+ est "Ei suuda siseneda kataloogi '%-.192s' (veakood: %M)"
+ fre "Ne peut changer le répertoire pour '%-.192s' (Errcode: %M)"
+ ger "Kann nicht in das Verzeichnis '%-.192s' wechseln (Fehler: %M)"
+ greek "ΑδÏνατη η αλλαγή του Ï„Ïέχοντος καταλόγου σε '%-.192s' (κωδικός λάθους: %M)"
+ hun "Konyvtarvaltas nem lehetseges a(z) '%-.192s'-ba. (hibakod: %M)"
+ ita "Impossibile cambiare la directory in '%-.192s' (errno: %M)"
+ jpn "'%-.192s' ディレクトリ㫠chdir ã§ãã¾ã›ã‚“.(errno: %M)"
+ kor "'%-.192s'디렉토리로 ì´ë™í•  수 없었습니다. (ì—러번호: %M)"
+ nor "Kan ikke skifte katalog til '%-.192s' (Feilkode: %M)"
+ norwegian-ny "Kan ikkje skifte katalog til '%-.192s' (Feilkode: %M)"
+ pol "Nie można zmienić katalogu na '%-.192s' (Kod błędu: %M)"
+ por "Não pode mudar para o diretório '%-.192s' (erro no. %M)"
+ rum "Nu pot sa schimb directorul '%-.192s' (Eroare: %M)"
+ rus "Ðевозможно перейти в каталог '%-.192s' (ошибка: %M)"
+ serbian "Ne mogu da promenim direktorijum na '%-.192s' (errno: %M)"
+ slo "Nemôžem vojsť do adresára '%-.192s' (chybový kód: %M)"
+ spa "No puedo cambiar al directorio de '%-.192s' (Error: %M)"
+ swe "Kan inte byta till '%-.192s' (Felkod: %M)"
+ ukr "Ðе можу перейти у теку '%-.192s' (помилка: %M)"
ER_CHECKREAD
cze "Z-Báznam byl zmÄ›nÄ›n od posledního Ätení v tabulce '%-.192s'"
dan "Posten er ændret siden sidste læsning '%-.192s'"
@@ -523,103 +523,103 @@ ER_DUP_KEY 23000
swe "Kan inte skriva, dubbel söknyckel i register '%-.192s'"
ukr "Ðе можу запиÑати, дублюючийÑÑ ÐºÐ»ÑŽÑ‡ в таблиці '%-.192s'"
ER_ERROR_ON_CLOSE
- cze "Chyba p-Bři zavírání '%-.192s' (chybový kód: %d)"
- dan "Fejl ved lukning af '%-.192s' (Fejlkode: %d)"
- nla "Fout bij het sluiten van '%-.192s' (Errcode: %d)"
- eng "Error on close of '%-.192s' (errno: %d)"
- est "Viga faili '%-.192s' sulgemisel (veakood: %d)"
- fre "Erreur a la fermeture de '%-.192s' (Errcode: %d)"
- ger "Fehler beim Schließen von '%-.192s' (Fehler: %d)"
- greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κλείνοντας το '%-.192s' (κωδικός λάθους: %d)"
- hun "Hiba a(z) '%-.192s' zarasakor. (hibakod: %d)"
- ita "Errore durante la chiusura di '%-.192s' (errno: %d)"
- kor "'%-.192s'닫는 중 ì—러 (ì—러번호: %d)"
- nor "Feil ved lukking av '%-.192s' (Feilkode: %d)"
- norwegian-ny "Feil ved lukking av '%-.192s' (Feilkode: %d)"
- pol "Bł?d podczas zamykania '%-.192s' (Kod błędu: %d)"
- por "Erro ao fechar '%-.192s' (erro no. %d)"
- rum "Eroare inchizind '%-.192s' (errno: %d)"
- rus "Ошибка при закрытии '%-.192s' (ошибка: %d)"
- serbian "Greška pri zatvaranju '%-.192s' (errno: %d)"
- slo "Chyba pri zatváraní '%-.192s' (chybový kód: %d)"
- spa "Error en el cierre de '%-.192s' (Error: %d)"
- swe "Fick fel vid stängning av '%-.192s' (Felkod: %d)"
- ukr "Ðе можу закрити '%-.192s' (помилка: %d)"
+ cze "Chyba p-Bři zavírání '%-.192s' (chybový kód: %M)"
+ dan "Fejl ved lukning af '%-.192s' (Fejlkode: %M)"
+ nla "Fout bij het sluiten van '%-.192s' (Errcode: %M)"
+ eng "Error on close of '%-.192s' (errno: %M)"
+ est "Viga faili '%-.192s' sulgemisel (veakood: %M)"
+ fre "Erreur a la fermeture de '%-.192s' (Errcode: %M)"
+ ger "Fehler beim Schließen von '%-.192s' (Fehler: %M)"
+ greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κλείνοντας το '%-.192s' (κωδικός λάθους: %M)"
+ hun "Hiba a(z) '%-.192s' zarasakor. (hibakod: %M)"
+ ita "Errore durante la chiusura di '%-.192s' (errno: %M)"
+ kor "'%-.192s'닫는 중 ì—러 (ì—러번호: %M)"
+ nor "Feil ved lukking av '%-.192s' (Feilkode: %M)"
+ norwegian-ny "Feil ved lukking av '%-.192s' (Feilkode: %M)"
+ pol "Bł?d podczas zamykania '%-.192s' (Kod błędu: %M)"
+ por "Erro ao fechar '%-.192s' (erro no. %M)"
+ rum "Eroare inchizind '%-.192s' (errno: %M)"
+ rus "Ошибка при закрытии '%-.192s' (ошибка: %M)"
+ serbian "Greška pri zatvaranju '%-.192s' (errno: %M)"
+ slo "Chyba pri zatváraní '%-.192s' (chybový kód: %M)"
+ spa "Error en el cierre de '%-.192s' (Error: %M)"
+ swe "Fick fel vid stängning av '%-.192s' (Felkod: %M)"
+ ukr "Ðе можу закрити '%-.192s' (помилка: %M)"
ER_ERROR_ON_READ
- cze "Chyba p-BÅ™i Ätení souboru '%-.200s' (chybový kód: %d)"
- dan "Fejl ved læsning af '%-.200s' (Fejlkode: %d)"
- nla "Fout bij het lezen van file '%-.200s' (Errcode: %d)"
- eng "Error reading file '%-.200s' (errno: %d)"
- jps "'%-.200s' ファイルã®èª­ã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (errno: %d)",
- est "Viga faili '%-.200s' lugemisel (veakood: %d)"
- fre "Erreur en lecture du fichier '%-.200s' (Errcode: %d)"
- ger "Fehler beim Lesen der Datei '%-.200s' (Fehler: %d)"
- greek "ΠÏόβλημα κατά την ανάγνωση του αÏχείου '%-.200s' (κωδικός λάθους: %d)"
- hun "Hiba a '%-.200s'file olvasasakor. (hibakod: %d)"
- ita "Errore durante la lettura del file '%-.200s' (errno: %d)"
- jpn "'%-.200s' ファイルã®èª­ã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (errno: %d)"
- kor "'%-.200s'í™”ì¼ ì½ê¸° ì—러 (ì—러번호: %d)"
- nor "Feil ved lesing av '%-.200s' (Feilkode: %d)"
- norwegian-ny "Feil ved lesing av '%-.200s' (Feilkode: %d)"
- pol "Bł?d podczas odczytu pliku '%-.200s' (Kod błędu: %d)"
- por "Erro ao ler arquivo '%-.200s' (erro no. %d)"
- rum "Eroare citind fisierul '%-.200s' (errno: %d)"
- rus "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° '%-.200s' (ошибка: %d)"
- serbian "GreÅ¡ka pri Äitanju file-a '%-.200s' (errno: %d)"
- slo "Chyba pri Äítaní súboru '%-.200s' (chybový kód: %d)"
- spa "Error leyendo el fichero '%-.200s' (Error: %d)"
- swe "Fick fel vid läsning av '%-.200s' (Felkod %d)"
- ukr "Ðе можу прочитати файл '%-.200s' (помилка: %d)"
+ cze "Chyba p-BÅ™i Ätení souboru '%-.200s' (chybový kód: %M)"
+ dan "Fejl ved læsning af '%-.200s' (Fejlkode: %M)"
+ nla "Fout bij het lezen van file '%-.200s' (Errcode: %M)"
+ eng "Error reading file '%-.200s' (errno: %M)"
+ jps "'%-.200s' ファイルã®èª­ã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (errno: %M)",
+ est "Viga faili '%-.200s' lugemisel (veakood: %M)"
+ fre "Erreur en lecture du fichier '%-.200s' (Errcode: %M)"
+ ger "Fehler beim Lesen der Datei '%-.200s' (Fehler: %M)"
+ greek "ΠÏόβλημα κατά την ανάγνωση του αÏχείου '%-.200s' (κωδικός λάθους: %M)"
+ hun "Hiba a '%-.200s'file olvasasakor. (hibakod: %M)"
+ ita "Errore durante la lettura del file '%-.200s' (errno: %M)"
+ jpn "'%-.200s' ファイルã®èª­ã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (errno: %M)"
+ kor "'%-.200s'í™”ì¼ ì½ê¸° ì—러 (ì—러번호: %M)"
+ nor "Feil ved lesing av '%-.200s' (Feilkode: %M)"
+ norwegian-ny "Feil ved lesing av '%-.200s' (Feilkode: %M)"
+ pol "Bł?d podczas odczytu pliku '%-.200s' (Kod błędu: %M)"
+ por "Erro ao ler arquivo '%-.200s' (erro no. %M)"
+ rum "Eroare citind fisierul '%-.200s' (errno: %M)"
+ rus "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° '%-.200s' (ошибка: %M)"
+ serbian "GreÅ¡ka pri Äitanju file-a '%-.200s' (errno: %M)"
+ slo "Chyba pri Äítaní súboru '%-.200s' (chybový kód: %M)"
+ spa "Error leyendo el fichero '%-.200s' (Error: %M)"
+ swe "Fick fel vid läsning av '%-.200s' (Felkod %M)"
+ ukr "Ðе можу прочитати файл '%-.200s' (помилка: %M)"
ER_ERROR_ON_RENAME
- cze "Chyba p-Bři přejmenování '%-.210s' na '%-.210s' (chybový kód: %d)"
- dan "Fejl ved omdøbning af '%-.210s' til '%-.210s' (Fejlkode: %d)"
- nla "Fout bij het hernoemen van '%-.210s' naar '%-.210s' (Errcode: %d)"
- eng "Error on rename of '%-.210s' to '%-.210s' (errno: %d)"
- jps "'%-.210s' ã‚’ '%-.210s' ã« rename ã§ãã¾ã›ã‚“ (errno: %d)",
- est "Viga faili '%-.210s' ümbernimetamisel '%-.210s'-ks (veakood: %d)"
- fre "Erreur en renommant '%-.210s' en '%-.210s' (Errcode: %d)"
- ger "Fehler beim Umbenennen von '%-.210s' in '%-.210s' (Fehler: %d)"
- greek "ΠÏόβλημα κατά την μετονομασία του αÏχείου '%-.210s' to '%-.210s' (κωδικός λάθους: %d)"
- hun "Hiba a '%-.210s' file atnevezesekor '%-.210s'. (hibakod: %d)"
- ita "Errore durante la rinominazione da '%-.210s' a '%-.210s' (errno: %d)"
- jpn "'%-.210s' ã‚’ '%-.210s' ã« rename ã§ãã¾ã›ã‚“ (errno: %d)"
- kor "'%-.210s'를 '%-.210s'로 ì´ë¦„ 변경중 ì—러 (ì—러번호: %d)"
- nor "Feil ved omdøping av '%-.210s' til '%-.210s' (Feilkode: %d)"
- norwegian-ny "Feil ved omdøyping av '%-.210s' til '%-.210s' (Feilkode: %d)"
- pol "Bł?d podczas zmieniania nazwy '%-.210s' na '%-.210s' (Kod błędu: %d)"
- por "Erro ao renomear '%-.210s' para '%-.210s' (erro no. %d)"
- rum "Eroare incercind sa renumesc '%-.210s' in '%-.210s' (errno: %d)"
- rus "Ошибка при переименовании '%-.210s' в '%-.210s' (ошибка: %d)"
- serbian "Greška pri promeni imena '%-.210s' na '%-.210s' (errno: %d)"
- slo "Chyba pri premenovávaní '%-.210s' na '%-.210s' (chybový kód: %d)"
- spa "Error en el renombrado de '%-.210s' a '%-.210s' (Error: %d)"
- swe "Kan inte byta namn från '%-.210s' till '%-.210s' (Felkod: %d)"
- ukr "Ðе можу перейменувати '%-.210s' у '%-.210s' (помилка: %d)"
+ cze "Chyba p-Bři přejmenování '%-.210s' na '%-.210s' (chybový kód: %M)"
+ dan "Fejl ved omdøbning af '%-.210s' til '%-.210s' (Fejlkode: %M)"
+ nla "Fout bij het hernoemen van '%-.210s' naar '%-.210s' (Errcode: %M)"
+ eng "Error on rename of '%-.210s' to '%-.210s' (errno: %M)"
+ jps "'%-.210s' ã‚’ '%-.210s' ã« rename ã§ãã¾ã›ã‚“ (errno: %M)",
+ est "Viga faili '%-.210s' ümbernimetamisel '%-.210s'-ks (veakood: %M)"
+ fre "Erreur en renommant '%-.210s' en '%-.210s' (Errcode: %M)"
+ ger "Fehler beim Umbenennen von '%-.210s' in '%-.210s' (Fehler: %M)"
+ greek "ΠÏόβλημα κατά την μετονομασία του αÏχείου '%-.210s' to '%-.210s' (κωδικός λάθους: %M)"
+ hun "Hiba a '%-.210s' file atnevezesekor '%-.210s'. (hibakod: %M)"
+ ita "Errore durante la rinominazione da '%-.210s' a '%-.210s' (errno: %M)"
+ jpn "'%-.210s' ã‚’ '%-.210s' ã« rename ã§ãã¾ã›ã‚“ (errno: %M)"
+ kor "'%-.210s'를 '%-.210s'로 ì´ë¦„ 변경중 ì—러 (ì—러번호: %M)"
+ nor "Feil ved omdøping av '%-.210s' til '%-.210s' (Feilkode: %M)"
+ norwegian-ny "Feil ved omdøyping av '%-.210s' til '%-.210s' (Feilkode: %M)"
+ pol "Bł?d podczas zmieniania nazwy '%-.210s' na '%-.210s' (Kod błędu: %M)"
+ por "Erro ao renomear '%-.210s' para '%-.210s' (erro no. %M)"
+ rum "Eroare incercind sa renumesc '%-.210s' in '%-.210s' (errno: %M)"
+ rus "Ошибка при переименовании '%-.210s' в '%-.210s' (ошибка: %M)"
+ serbian "Greška pri promeni imena '%-.210s' na '%-.210s' (errno: %M)"
+ slo "Chyba pri premenovávaní '%-.210s' na '%-.210s' (chybový kód: %M)"
+ spa "Error en el renombrado de '%-.210s' a '%-.210s' (Error: %M)"
+ swe "Kan inte byta namn från '%-.210s' till '%-.210s' (Felkod: %M)"
+ ukr "Ðе можу перейменувати '%-.210s' у '%-.210s' (помилка: %M)"
ER_ERROR_ON_WRITE
- cze "Chyba p-Bři zápisu do souboru '%-.200s' (chybový kód: %d)"
- dan "Fejl ved skriving av filen '%-.200s' (Fejlkode: %d)"
- nla "Fout bij het wegschrijven van file '%-.200s' (Errcode: %d)"
- eng "Error writing file '%-.200s' (errno: %d)"
- jps "'%-.200s' ファイルを書ã事ãŒã§ãã¾ã›ã‚“ (errno: %d)",
- est "Viga faili '%-.200s' kirjutamisel (veakood: %d)"
- fre "Erreur d'écriture du fichier '%-.200s' (Errcode: %d)"
- ger "Fehler beim Speichern der Datei '%-.200s' (Fehler: %d)"
- greek "ΠÏόβλημα κατά την αποθήκευση του αÏχείου '%-.200s' (κωδικός λάθους: %d)"
- hun "Hiba a '%-.200s' file irasakor. (hibakod: %d)"
- ita "Errore durante la scrittura del file '%-.200s' (errno: %d)"
- jpn "'%-.200s' ファイルを書ã事ãŒã§ãã¾ã›ã‚“ (errno: %d)"
- kor "'%-.200s'í™”ì¼ ê¸°ë¡ ì¤‘ ì—러 (ì—러번호: %d)"
- nor "Feil ved skriving av fila '%-.200s' (Feilkode: %d)"
- norwegian-ny "Feil ved skriving av fila '%-.200s' (Feilkode: %d)"
- pol "Bł?d podczas zapisywania pliku '%-.200s' (Kod błędu: %d)"
- por "Erro ao gravar arquivo '%-.200s' (erro no. %d)"
- rum "Eroare scriind fisierul '%-.200s' (errno: %d)"
- rus "Ошибка запиÑи в файл '%-.200s' (ошибка: %d)"
- serbian "Greška pri upisu '%-.200s' (errno: %d)"
- slo "Chyba pri zápise do súboru '%-.200s' (chybový kód: %d)"
- spa "Error escribiendo el archivo '%-.200s' (Error: %d)"
- swe "Fick fel vid skrivning till '%-.200s' (Felkod %d)"
- ukr "Ðе можу запиÑати файл '%-.200s' (помилка: %d)"
+ cze "Chyba p-Bři zápisu do souboru '%-.200s' (chybový kód: %M)"
+ dan "Fejl ved skriving av filen '%-.200s' (Fejlkode: %M)"
+ nla "Fout bij het wegschrijven van file '%-.200s' (Errcode: %M)"
+ eng "Error writing file '%-.200s' (errno: %M)"
+ jps "'%-.200s' ファイルを書ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)",
+ est "Viga faili '%-.200s' kirjutamisel (veakood: %M)"
+ fre "Erreur d'écriture du fichier '%-.200s' (Errcode: %M)"
+ ger "Fehler beim Speichern der Datei '%-.200s' (Fehler: %M)"
+ greek "ΠÏόβλημα κατά την αποθήκευση του αÏχείου '%-.200s' (κωδικός λάθους: %M)"
+ hun "Hiba a '%-.200s' file irasakor. (hibakod: %M)"
+ ita "Errore durante la scrittura del file '%-.200s' (errno: %M)"
+ jpn "'%-.200s' ファイルを書ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)"
+ kor "'%-.200s'í™”ì¼ ê¸°ë¡ ì¤‘ ì—러 (ì—러번호: %M)"
+ nor "Feil ved skriving av fila '%-.200s' (Feilkode: %M)"
+ norwegian-ny "Feil ved skriving av fila '%-.200s' (Feilkode: %M)"
+ pol "Bł?d podczas zapisywania pliku '%-.200s' (Kod błędu: %M)"
+ por "Erro ao gravar arquivo '%-.200s' (erro no. %M)"
+ rum "Eroare scriind fisierul '%-.200s' (errno: %M)"
+ rus "Ошибка запиÑи в файл '%-.200s' (ошибка: %M)"
+ serbian "Greška pri upisu '%-.200s' (errno: %M)"
+ slo "Chyba pri zápise do súboru '%-.200s' (chybový kód: %M)"
+ spa "Error escribiendo el archivo '%-.200s' (Error: %M)"
+ swe "Fick fel vid skrivning till '%-.200s' (Felkod %M)"
+ ukr "Ðе можу запиÑати файл '%-.200s' (помилка: %M)"
ER_FILE_USED
cze "'%-.192s' je zam-BÄen proti zmÄ›nám"
dan "'%-.192s' er låst mod opdateringer"
@@ -696,29 +696,29 @@ ER_FORM_NOT_FOUND
swe "Formulär '%-.192s' finns inte i '%-.192s'"
ukr "ВиглÑд '%-.192s' не Ñ–Ñнує Ð´Ð»Ñ '%-.192s'"
ER_GET_ERRNO
- cze "Obsluha tabulky vr-Bátila chybu %d"
- dan "Modtog fejl %d fra tabel håndteringen"
- nla "Fout %d van tabel handler"
- eng "Got error %d from storage engine"
- est "Tabeli handler tagastas vea %d"
- fre "Reçu l'erreur %d du handler de la table"
- ger "Fehler %d (Speicher-Engine)"
- greek "Ελήφθη μήνυμα λάθους %d από τον χειÏιστή πίνακα (table handler)"
- hun "%d hibajelzes a tablakezelotol"
- ita "Rilevato l'errore %d dal gestore delle tabelle"
- jpn "Got error %d from table handler"
- kor "í…Œì´ë¸” handlerì—서 %d ì—러가 ë°œìƒ í•˜ì˜€ìŠµë‹ˆë‹¤."
- nor "Mottok feil %d fra tabell håndterer"
- norwegian-ny "Mottok feil %d fra tabell handterar"
- pol "Otrzymano bł?d %d z obsługi tabeli"
- por "Obteve erro %d no manipulador de tabelas"
- rum "Eroarea %d obtinuta din handlerul tabelei"
- rus "Получена ошибка %d от обработчика таблиц"
- serbian "Handler tabela je vratio grešku %d"
- slo "Obsluha tabuľky vrátila chybu %d"
- spa "Error %d desde el manejador de la tabla"
- swe "Fick felkod %d från databashanteraren"
- ukr "Отримано помилку %d від деÑкриптора таблиці"
+ cze "Obsluha tabulky vr-Bátila chybu %M"
+ dan "Modtog fejl %M fra tabel håndteringen"
+ nla "Fout %M van tabel handler"
+ eng "Got error %M from storage engine"
+ est "Tabeli handler tagastas vea %M"
+ fre "Reçu l'erreur %M du handler de la table"
+ ger "Fehler %M (Speicher-Engine)"
+ greek "Ελήφθη μήνυμα λάθους %M από τον χειÏιστή πίνακα (table handler)"
+ hun "%M hibajelzes a tablakezelotol"
+ ita "Rilevato l'errore %M dal gestore delle tabelle"
+ jpn "Got error %M from table handler"
+ kor "í…Œì´ë¸” handlerì—서 %M ì—러가 ë°œìƒ í•˜ì˜€ìŠµë‹ˆë‹¤."
+ nor "Mottok feil %M fra tabell håndterer"
+ norwegian-ny "Mottok feil %M fra tabell handterar"
+ pol "Otrzymano bł?d %M z obsługi tabeli"
+ por "Obteve erro %M no manipulador de tabelas"
+ rum "Eroarea %M obtinuta din handlerul tabelei"
+ rus "Получена ошибка %M от обработчика таблиц"
+ serbian "Handler tabela je vratio grešku %M"
+ slo "Obsluha tabuľky vrátila chybu %M"
+ spa "Error %M desde el manejador de la tabla"
+ swe "Fick felkod %M från databashanteraren"
+ ukr "Отримано помилку %M від деÑкриптора таблиці"
ER_ILLEGAL_HA
cze "Obsluha tabulky '%-.192s' nem-Bá tento parametr"
dan "Denne mulighed eksisterer ikke for tabeltypen '%-.192s'"
@@ -919,30 +919,30 @@ ER_OUT_OF_SORTMEMORY HY001 S1001
swe "Sorteringsbufferten räcker inte till. Kontrollera startparametrarna"
ukr "Брак пам'Ñті Ð´Ð»Ñ ÑортуваннÑ. Треба збільшити розмір буфера ÑÐ¾Ñ€Ñ‚ÑƒÐ²Ð°Ð½Ð½Ñ Ñƒ Ñервера"
ER_UNEXPECTED_EOF
- cze "Neo-BÄekávaný konec souboru pÅ™i Ätení '%-.192s' (chybový kód: %d)"
- dan "Uventet afslutning på fil (eof) ved læsning af filen '%-.192s' (Fejlkode: %d)"
- nla "Onverwachte eof gevonden tijdens het lezen van file '%-.192s' (Errcode: %d)"
- eng "Unexpected EOF found when reading file '%-.192s' (errno: %d)"
- jps "'%-.192s' ファイルを読ã¿è¾¼ã¿ä¸­ã« EOF ãŒäºˆæœŸã›ã¬æ‰€ã§ç¾ã‚Œã¾ã—ãŸ. (errno: %d)",
- est "Ootamatu faililõpumärgend faili '%-.192s' lugemisel (veakood: %d)"
- fre "Fin de fichier inattendue en lisant '%-.192s' (Errcode: %d)"
- ger "Unerwartetes Ende beim Lesen der Datei '%-.192s' (Fehler: %d)"
- greek "Κατά τη διάÏκεια της ανάγνωσης, βÏέθηκε απÏοσδόκητα το τέλος του αÏχείου '%-.192s' (κωδικός λάθους: %d)"
- hun "Varatlan filevege-jel a '%-.192s'olvasasakor. (hibakod: %d)"
- ita "Fine del file inaspettata durante la lettura del file '%-.192s' (errno: %d)"
- jpn "'%-.192s' ファイルを読ã¿è¾¼ã¿ä¸­ã« EOF ãŒäºˆæœŸã›ã¬æ‰€ã§ç¾ã‚Œã¾ã—ãŸ. (errno: %d)"
- kor "'%-.192s' í™”ì¼ì„ ì½ëŠ” ë„중 ìž˜ëª»ëœ eofì„ ë°œê²¬ (ì—러번호: %d)"
- nor "Uventet slutt på fil (eof) ved lesing av filen '%-.192s' (Feilkode: %d)"
- norwegian-ny "Uventa slutt på fil (eof) ved lesing av fila '%-.192s' (Feilkode: %d)"
- pol "Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.192s' (Kod błędu: %d)"
- por "Encontrado fim de arquivo inesperado ao ler arquivo '%-.192s' (erro no. %d)"
- rum "Sfirsit de fisier neasteptat in citirea fisierului '%-.192s' (errno: %d)"
- rus "Ðеожиданный конец файла '%-.192s' (ошибка: %d)"
- serbian "NeoÄekivani kraj pri Äitanju file-a '%-.192s' (errno: %d)"
- slo "NeoÄakávaný koniec súboru pri Äítaní '%-.192s' (chybový kód: %d)"
- spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.192s' (Error: %d)"
- swe "Oväntat filslut vid läsning från '%-.192s' (Felkod: %d)"
- ukr "Хибний кінець файлу '%-.192s' (помилка: %d)"
+ cze "Neo-BÄekávaný konec souboru pÅ™i Ätení '%-.192s' (chybový kód: %M)"
+ dan "Uventet afslutning på fil (eof) ved læsning af filen '%-.192s' (Fejlkode: %M)"
+ nla "Onverwachte eof gevonden tijdens het lezen van file '%-.192s' (Errcode: %M)"
+ eng "Unexpected EOF found when reading file '%-.192s' (errno: %M)"
+ jps "'%-.192s' ファイルを読ã¿è¾¼ã¿ä¸­ã« EOF ãŒäºˆæœŸã›ã¬æ‰€ã§ç¾ã‚Œã¾ã—ãŸ. (errno: %M)",
+ est "Ootamatu faililõpumärgend faili '%-.192s' lugemisel (veakood: %M)"
+ fre "Fin de fichier inattendue en lisant '%-.192s' (Errcode: %M)"
+ ger "Unerwartetes Ende beim Lesen der Datei '%-.192s' (Fehler: %M)"
+ greek "Κατά τη διάÏκεια της ανάγνωσης, βÏέθηκε απÏοσδόκητα το τέλος του αÏχείου '%-.192s' (κωδικός λάθους: %M)"
+ hun "Varatlan filevege-jel a '%-.192s'olvasasakor. (hibakod: %M)"
+ ita "Fine del file inaspettata durante la lettura del file '%-.192s' (errno: %M)"
+ jpn "'%-.192s' ファイルを読ã¿è¾¼ã¿ä¸­ã« EOF ãŒäºˆæœŸã›ã¬æ‰€ã§ç¾ã‚Œã¾ã—ãŸ. (errno: %M)"
+ kor "'%-.192s' í™”ì¼ì„ ì½ëŠ” ë„중 ìž˜ëª»ëœ eofì„ ë°œê²¬ (ì—러번호: %M)"
+ nor "Uventet slutt på fil (eof) ved lesing av filen '%-.192s' (Feilkode: %M)"
+ norwegian-ny "Uventa slutt på fil (eof) ved lesing av fila '%-.192s' (Feilkode: %M)"
+ pol "Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.192s' (Kod błędu: %M)"
+ por "Encontrado fim de arquivo inesperado ao ler arquivo '%-.192s' (erro no. %M)"
+ rum "Sfirsit de fisier neasteptat in citirea fisierului '%-.192s' (errno: %M)"
+ rus "Ðеожиданный конец файла '%-.192s' (ошибка: %M)"
+ serbian "NeoÄekivani kraj pri Äitanju file-a '%-.192s' (errno: %M)"
+ slo "NeoÄakávaný koniec súboru pri Äítaní '%-.192s' (chybový kód: %M)"
+ spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.192s' (Error: %M)"
+ swe "Oväntat filslut vid läsning från '%-.192s' (Felkod: %M)"
+ ukr "Хибний кінець файлу '%-.192s' (помилка: %M)"
ER_CON_COUNT_ERROR 08004
cze "P-Bříliš mnoho spojení"
dan "For mange forbindelser (connections)"
@@ -2808,7 +2808,7 @@ ER_TOO_BIG_ROWSIZE 42000
cze "-BŘádek je příliÅ¡ velký. Maximální velikost řádku, nepoÄítaje položky blob, je %ld. Musíte zmÄ›nit nÄ›které položky na blob"
dan "For store poster. Max post størrelse, uden BLOB's, er %ld. Du må lave nogle felter til BLOB's"
nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %ld. U dient sommige velden in blobs te veranderen."
- eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs"
+ eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs"
jps "row size ãŒå¤§ãã™ãŽã¾ã™. BLOB ã‚’å«ã¾ãªã„å ´åˆã® row size ã®æœ€å¤§ã¯ %ld ã§ã™. ã„ãã¤ã‹ã® field ã‚’ BLOB ã«å¤‰ãˆã¦ãã ã•ã„.",
est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %ld. Muuda mõned väljad BLOB-tüüpi väljadeks"
fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %ld. Changez le type de quelques colonnes en BLOB"
@@ -3150,28 +3150,28 @@ ER_UPDATE_INFO
swe "Rader: %ld Uppdaterade: %ld Varningar: %ld"
ukr "ЗапиÑів відповідає: %ld Змінено: %ld ЗаÑтережень: %ld"
ER_CANT_CREATE_THREAD
- cze "Nemohu vytvo-BÅ™it nový thread (errno %d). Pokud je jeÅ¡tÄ› nÄ›jaká volná paměť, podívejte se do manuálu na Äást o chybách specifických pro jednotlivé operaÄní systémy"
- dan "Kan ikke danne en ny tråd (fejl nr. %d). Hvis computeren ikke er løbet tør for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhængig fejl"
- nla "Kan geen nieuwe thread aanmaken (Errcode: %d). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout"
- eng "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug"
- jps "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ãŒä½œã‚Œã¾ã›ã‚“ã§ã—㟠(errno %d). ã‚‚ã—æœ€å¤§ä½¿ç”¨è¨±å¯ãƒ¡ãƒ¢ãƒªãƒ¼æ•°ã‚’è¶Šãˆã¦ã„ãªã„ã®ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¦ã„ã‚‹ãªã‚‰, マニュアルã®ä¸­ã‹ã‚‰ 'possible OS-dependent bug' ã¨ã„ã†æ–‡å­—を探ã—ã¦ãã¿ã¦ã ã•ã„.",
- est "Ei suuda luua uut lõime (veakood %d). Kui mälu ei ole otsas, on tõenäoliselt tegemist operatsioonisüsteemispetsiifilise veaga"
- fre "Impossible de créer une nouvelle tâche (errno %d). S'il reste de la mémoire libre, consultez le manual pour trouver un éventuel bug dépendant de l'OS"
- ger "Kann keinen neuen Thread erzeugen (Fehler: %d). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen"
- hun "Uj thread letrehozasa nem lehetseges (Hibakod: %d). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet"
- ita "Impossibile creare un nuovo thread (errno %d). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO"
- jpn "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ãŒä½œã‚Œã¾ã›ã‚“ã§ã—㟠(errno %d). ã‚‚ã—æœ€å¤§ä½¿ç”¨è¨±å¯ãƒ¡ãƒ¢ãƒªãƒ¼æ•°ã‚’è¶Šãˆã¦ã„ãªã„ã®ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¦ã„ã‚‹ãªã‚‰, マニュアルã®ä¸­ã‹ã‚‰ 'possible OS-dependent bug' ã¨ã„ã†æ–‡å­—を探ã—ã¦ãã¿ã¦ã ã•ã„."
- kor "새로운 쓰레드를 만들 수 없습니다.(ì—러번호 %d). 만약 여유메모리가 있다면 OS-dependent버그 ì˜ ë©”ë‰´ì–¼ ë¶€ë¶„ì„ ì°¾ì•„ë³´ì‹œì˜¤."
- nor "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
- norwegian-ny "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
- pol "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
- por "Não pode criar uma nova 'thread' (erro no. %d). Se você não estiver sem memória disponível, você pode consultar o manual sobre um possível 'bug' dependente do sistema operacional"
- rum "Nu pot crea un thread nou (Eroare %d). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare"
- rus "Ðевозможно Ñоздать новый поток (ошибка %d). ЕÑли Ñто не ÑитуациÑ, ÑвÑÐ·Ð°Ð½Ð½Ð°Ñ Ñ Ð½ÐµÑ…Ð²Ð°Ñ‚ÐºÐ¾Ð¹ памÑти, то вам Ñледует изучить документацию на предмет опиÑÐ°Ð½Ð¸Ñ Ð²Ð¾Ð·Ð¼Ð¾Ð¶Ð½Ð¾Ð¹ ошибки работы в конкретной ОС"
- serbian "Ne mogu da kreiram novi thread (errno %d). Ako imate joÅ¡ slobodne memorije, trebali biste da pogledate u priruÄniku da li je ovo specifiÄna greÅ¡ka vaÅ¡eg operativnog sistema"
- spa "No puedo crear un nuevo thread (errno %d). Si tu está con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO"
- swe "Kan inte skapa en ny tråd (errno %d)"
- ukr "Ðе можу Ñтворити нову гілку (помилка %d). Якщо ви не викориÑтали уÑÑŽ пам'Ñть, то прочитайте документацію до вашої ОС - можливо це помилка ОС"
+ cze "Nemohu vytvo-BÅ™it nový thread (errno %M). Pokud je jeÅ¡tÄ› nÄ›jaká volná paměť, podívejte se do manuálu na Äást o chybách specifických pro jednotlivé operaÄní systémy"
+ dan "Kan ikke danne en ny tråd (fejl nr. %M). Hvis computeren ikke er løbet tør for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhængig fejl"
+ nla "Kan geen nieuwe thread aanmaken (Errcode: %M). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout"
+ eng "Can't create a new thread (errno %M); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug"
+ jps "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ãŒä½œã‚Œã¾ã›ã‚“ã§ã—㟠(errno %M). ã‚‚ã—æœ€å¤§ä½¿ç”¨è¨±å¯ãƒ¡ãƒ¢ãƒªãƒ¼æ•°ã‚’è¶Šãˆã¦ã„ãªã„ã®ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¦ã„ã‚‹ãªã‚‰, マニュアルã®ä¸­ã‹ã‚‰ 'possible OS-dependent bug' ã¨ã„ã†æ–‡å­—を探ã—ã¦ãã¿ã¦ã ã•ã„.",
+ est "Ei suuda luua uut lõime (veakood %M). Kui mälu ei ole otsas, on tõenäoliselt tegemist operatsioonisüsteemispetsiifilise veaga"
+ fre "Impossible de créer une nouvelle tâche (errno %M). S'il reste de la mémoire libre, consultez le manual pour trouver un éventuel bug dépendant de l'OS"
+ ger "Kann keinen neuen Thread erzeugen (Fehler: %M). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen"
+ hun "Uj thread letrehozasa nem lehetseges (Hibakod: %M). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet"
+ ita "Impossibile creare un nuovo thread (errno %M). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO"
+ jpn "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ãŒä½œã‚Œã¾ã›ã‚“ã§ã—㟠(errno %M). ã‚‚ã—æœ€å¤§ä½¿ç”¨è¨±å¯ãƒ¡ãƒ¢ãƒªãƒ¼æ•°ã‚’è¶Šãˆã¦ã„ãªã„ã®ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¦ã„ã‚‹ãªã‚‰, マニュアルã®ä¸­ã‹ã‚‰ 'possible OS-dependent bug' ã¨ã„ã†æ–‡å­—を探ã—ã¦ãã¿ã¦ã ã•ã„."
+ kor "새로운 쓰레드를 만들 수 없습니다.(ì—러번호 %M). 만약 여유메모리가 있다면 OS-dependent버그 ì˜ ë©”ë‰´ì–¼ ë¶€ë¶„ì„ ì°¾ì•„ë³´ì‹œì˜¤."
+ nor "Can't create a new thread (errno %M); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
+ norwegian-ny "Can't create a new thread (errno %M); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
+ pol "Can't create a new thread (errno %M); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
+ por "Não pode criar uma nova 'thread' (erro no. %M). Se você não estiver sem memória disponível, você pode consultar o manual sobre um possível 'bug' dependente do sistema operacional"
+ rum "Nu pot crea un thread nou (Eroare %M). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare"
+ rus "Ðевозможно Ñоздать новый поток (ошибка %M). ЕÑли Ñто не ÑитуациÑ, ÑвÑÐ·Ð°Ð½Ð½Ð°Ñ Ñ Ð½ÐµÑ…Ð²Ð°Ñ‚ÐºÐ¾Ð¹ памÑти, то вам Ñледует изучить документацию на предмет опиÑÐ°Ð½Ð¸Ñ Ð²Ð¾Ð·Ð¼Ð¾Ð¶Ð½Ð¾Ð¹ ошибки работы в конкретной ОС"
+ serbian "Ne mogu da kreiram novi thread (errno %M). Ako imate joÅ¡ slobodne memorije, trebali biste da pogledate u priruÄniku da li je ovo specifiÄna greÅ¡ka vaÅ¡eg operativnog sistema"
+ spa "No puedo crear un nuevo thread (errno %M). Si tu está con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO"
+ swe "Kan inte skapa en ny tråd (errno %M)"
+ ukr "Ðе можу Ñтворити нову гілку (помилка %M). Якщо ви не викориÑтали уÑÑŽ пам'Ñть, то прочитайте документацію до вашої ОС - можливо це помилка ОС"
ER_WRONG_VALUE_COUNT_ON_ROW 21S01
cze "Po-BÄet sloupců neodpovídá poÄtu hodnot na řádku %lu"
dan "Kolonne antallet stemmer ikke overens med antallet af værdier i post %lu"
@@ -3288,45 +3288,45 @@ ER_NONEXISTING_GRANT 42000
swe "Det finns inget privilegium definierat för användare '%-.48s' på '%-.64s'"
ukr "Повноважень не визначено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача '%-.48s' з хоÑту '%-.64s'"
ER_TABLEACCESS_DENIED_ERROR 42000
- cze "%-.16s p-Bříkaz nepřístupný pro uživatele: '%-.48s'@'%-.64s' pro tabulku '%-.192s'"
- dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.48s'@'%-.64s' for tabellen '%-.192s'"
- nla "%-.16s commando geweigerd voor gebruiker: '%-.48s'@'%-.64s' voor tabel '%-.192s'"
- eng "%-.16s command denied to user '%-.48s'@'%-.64s' for table '%-.192s'"
- jps "コマンド %-.16s 㯠ユーザー '%-.48s'@'%-.64s' ,テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•れã¦ã„ã¾ã›ã‚“",
- est "%-.16s käsk ei ole lubatud kasutajale '%-.48s'@'%-.64s' tabelis '%-.192s'"
- fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.48s'@'@%-.64s' sur la table '%-.192s'"
- ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.48s'@'%-.64s' auf Tabelle '%-.192s'"
- hun "%-.16s parancs a '%-.48s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.192s' tablaban"
- ita "Comando %-.16s negato per l'utente: '%-.48s'@'%-.64s' sulla tabella '%-.192s'"
- jpn "コマンド %-.16s 㯠ユーザー '%-.48s'@'%-.64s' ,テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•れã¦ã„ã¾ã›ã‚“"
- kor "'%-.16s' ëª…ë ¹ì€ ë‹¤ìŒ ì‚¬ìš©ìžì—게 ê±°ë¶€ë˜ì—ˆìŠµë‹ˆë‹¤. : '%-.48s'@'%-.64s' for í…Œì´ë¸” '%-.192s'"
- por "Comando '%-.16s' negado para o usuário '%-.48s'@'%-.64s' na tabela '%-.192s'"
- rum "Comanda %-.16s interzisa utilizatorului: '%-.48s'@'%-.64s' pentru tabela '%-.192s'"
- rus "Команда %-.16s запрещена пользователю '%-.48s'@'%-.64s' Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ‹ '%-.192s'"
- serbian "%-.16s komanda zabranjena za korisnika '%-.48s'@'%-.64s' za tabelu '%-.192s'"
- spa "%-.16s comando negado para usuario: '%-.48s'@'%-.64s' para tabla '%-.192s'"
- swe "%-.16s ej tillåtet för '%-.48s'@'%-.64s' för tabell '%-.192s'"
- ukr "%-.16s команда заборонена кориÑтувачу: '%-.48s'@'%-.64s' у таблиці '%-.192s'"
+ cze "%-.32s p-Bříkaz nepřístupný pro uživatele: '%-.48s'@'%-.64s' pro tabulku '%-.192s'"
+ dan "%-.32s-kommandoen er ikke tilladt for brugeren '%-.48s'@'%-.64s' for tabellen '%-.192s'"
+ nla "%-.32s commando geweigerd voor gebruiker: '%-.48s'@'%-.64s' voor tabel '%-.192s'"
+ eng "%-.32s command denied to user '%-.48s'@'%-.64s' for table '%-.192s'"
+ jps "コマンド %-.32s 㯠ユーザー '%-.48s'@'%-.64s' ,テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•れã¦ã„ã¾ã›ã‚“",
+ est "%-.32s käsk ei ole lubatud kasutajale '%-.48s'@'%-.64s' tabelis '%-.192s'"
+ fre "La commande '%-.32s' est interdite à l'utilisateur: '%-.48s'@'@%-.64s' sur la table '%-.192s'"
+ ger "%-.32s Befehl nicht erlaubt für Benutzer '%-.48s'@'%-.64s' auf Tabelle '%-.192s'"
+ hun "%-.32s parancs a '%-.48s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.192s' tablaban"
+ ita "Comando %-.32s negato per l'utente: '%-.48s'@'%-.64s' sulla tabella '%-.192s'"
+ jpn "コマンド %-.32s 㯠ユーザー '%-.48s'@'%-.64s' ,テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•れã¦ã„ã¾ã›ã‚“"
+ kor "'%-.32s' ëª…ë ¹ì€ ë‹¤ìŒ ì‚¬ìš©ìžì—게 ê±°ë¶€ë˜ì—ˆìŠµë‹ˆë‹¤. : '%-.48s'@'%-.64s' for í…Œì´ë¸” '%-.192s'"
+ por "Comando '%-.32s' negado para o usuário '%-.48s'@'%-.64s' na tabela '%-.192s'"
+ rum "Comanda %-.32s interzisa utilizatorului: '%-.48s'@'%-.64s' pentru tabela '%-.192s'"
+ rus "Команда %-.32s запрещена пользователю '%-.48s'@'%-.64s' Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ‹ '%-.192s'"
+ serbian "%-.32s komanda zabranjena za korisnika '%-.48s'@'%-.64s' za tabelu '%-.192s'"
+ spa "%-.32s comando negado para usuario: '%-.48s'@'%-.64s' para tabla '%-.192s'"
+ swe "%-.32s ej tillåtet för '%-.48s'@'%-.64s' för tabell '%-.192s'"
+ ukr "%-.32s команда заборонена кориÑтувачу: '%-.48s'@'%-.64s' у таблиці '%-.192s'"
ER_COLUMNACCESS_DENIED_ERROR 42000
- cze "%-.16s p-Bříkaz nepřístupný pro uživatele: '%-.48s'@'%-.64s' pro sloupec '%-.192s' v tabulce '%-.192s'"
- dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.48s'@'%-.64s' for kolonne '%-.192s' in tabellen '%-.192s'"
- nla "%-.16s commando geweigerd voor gebruiker: '%-.48s'@'%-.64s' voor kolom '%-.192s' in tabel '%-.192s'"
- eng "%-.16s command denied to user '%-.48s'@'%-.64s' for column '%-.192s' in table '%-.192s'"
- jps "コマンド %-.16s 㯠ユーザー '%-.48s'@'%-.64s'Â¥n カラム '%-.192s' テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•れã¦ã„ã¾ã›ã‚“",
- est "%-.16s käsk ei ole lubatud kasutajale '%-.48s'@'%-.64s' tulbale '%-.192s' tabelis '%-.192s'"
- fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.48s'@'@%-.64s' sur la colonne '%-.192s' de la table '%-.192s'"
- ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.48s'@'%-.64s' und Feld '%-.192s' in Tabelle '%-.192s'"
- hun "%-.16s parancs a '%-.48s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.192s' mezo eseten a '%-.192s' tablaban"
- ita "Comando %-.16s negato per l'utente: '%-.48s'@'%-.64s' sulla colonna '%-.192s' della tabella '%-.192s'"
- jpn "コマンド %-.16s 㯠ユーザー '%-.48s'@'%-.64s'\n カラム '%-.192s' テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•れã¦ã„ã¾ã›ã‚“"
- kor "'%-.16s' ëª…ë ¹ì€ ë‹¤ìŒ ì‚¬ìš©ìžì—게 ê±°ë¶€ë˜ì—ˆìŠµë‹ˆë‹¤. : '%-.48s'@'%-.64s' for 칼럼 '%-.192s' in í…Œì´ë¸” '%-.192s'"
- por "Comando '%-.16s' negado para o usuário '%-.48s'@'%-.64s' na coluna '%-.192s', na tabela '%-.192s'"
- rum "Comanda %-.16s interzisa utilizatorului: '%-.48s'@'%-.64s' pentru coloana '%-.192s' in tabela '%-.192s'"
- rus "Команда %-.16s запрещена пользователю '%-.48s'@'%-.64s' Ð´Ð»Ñ Ñтолбца '%-.192s' в таблице '%-.192s'"
- serbian "%-.16s komanda zabranjena za korisnika '%-.48s'@'%-.64s' za kolonu '%-.192s' iz tabele '%-.192s'"
- spa "%-.16s comando negado para usuario: '%-.48s'@'%-.64s' para columna '%-.192s' en la tabla '%-.192s'"
- swe "%-.16s ej tillåtet för '%-.48s'@'%-.64s' för kolumn '%-.192s' i tabell '%-.192s'"
- ukr "%-.16s команда заборонена кориÑтувачу: '%-.48s'@'%-.64s' Ð´Ð»Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s' у таблиці '%-.192s'"
+ cze "%-.32s p-Bříkaz nepřístupný pro uživatele: '%-.48s'@'%-.64s' pro sloupec '%-.192s' v tabulce '%-.192s'"
+ dan "%-.32s-kommandoen er ikke tilladt for brugeren '%-.48s'@'%-.64s' for kolonne '%-.192s' in tabellen '%-.192s'"
+ nla "%-.32s commando geweigerd voor gebruiker: '%-.48s'@'%-.64s' voor kolom '%-.192s' in tabel '%-.192s'"
+ eng "%-.32s command denied to user '%-.48s'@'%-.64s' for column '%-.192s' in table '%-.192s'"
+ jps "コマンド %-.32s 㯠ユーザー '%-.48s'@'%-.64s'Â¥n カラム '%-.192s' テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•れã¦ã„ã¾ã›ã‚“",
+ est "%-.32s käsk ei ole lubatud kasutajale '%-.48s'@'%-.64s' tulbale '%-.192s' tabelis '%-.192s'"
+ fre "La commande '%-.32s' est interdite à l'utilisateur: '%-.48s'@'@%-.64s' sur la colonne '%-.192s' de la table '%-.192s'"
+ ger "%-.32s Befehl nicht erlaubt für Benutzer '%-.48s'@'%-.64s' und Feld '%-.192s' in Tabelle '%-.192s'"
+ hun "%-.32s parancs a '%-.48s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.192s' mezo eseten a '%-.192s' tablaban"
+ ita "Comando %-.32s negato per l'utente: '%-.48s'@'%-.64s' sulla colonna '%-.192s' della tabella '%-.192s'"
+ jpn "コマンド %-.32s 㯠ユーザー '%-.48s'@'%-.64s'\n カラム '%-.192s' テーブル '%-.192s' ã«å¯¾ã—ã¦è¨±å¯ã•れã¦ã„ã¾ã›ã‚“"
+ kor "'%-.32s' ëª…ë ¹ì€ ë‹¤ìŒ ì‚¬ìš©ìžì—게 ê±°ë¶€ë˜ì—ˆìŠµë‹ˆë‹¤. : '%-.48s'@'%-.64s' for 칼럼 '%-.192s' in í…Œì´ë¸” '%-.192s'"
+ por "Comando '%-.32s' negado para o usuário '%-.48s'@'%-.64s' na coluna '%-.192s', na tabela '%-.192s'"
+ rum "Comanda %-.32s interzisa utilizatorului: '%-.48s'@'%-.64s' pentru coloana '%-.192s' in tabela '%-.192s'"
+ rus "Команда %-.32s запрещена пользователю '%-.48s'@'%-.64s' Ð´Ð»Ñ Ñтолбца '%-.192s' в таблице '%-.192s'"
+ serbian "%-.32s komanda zabranjena za korisnika '%-.48s'@'%-.64s' za kolonu '%-.192s' iz tabele '%-.192s'"
+ spa "%-.32s comando negado para usuario: '%-.48s'@'%-.64s' para columna '%-.192s' en la tabla '%-.192s'"
+ swe "%-.32s ej tillåtet för '%-.48s'@'%-.64s' för kolumn '%-.192s' i tabell '%-.192s'"
+ ukr "%-.32s команда заборонена кориÑтувачу: '%-.48s'@'%-.64s' Ð´Ð»Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s' у таблиці '%-.192s'"
ER_ILLEGAL_GRANT_FOR_TABLE 42000
cze "Neplatn-Bý příkaz GRANT/REVOKE. Prosím, pÅ™eÄtÄ›te si v manuálu, jaká privilegia je možné použít."
dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres."
@@ -4010,69 +4010,69 @@ ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000
swe "Du får inte utföra detta kommando i en transaktion"
ukr "Вам не дозволено виконувати цю команду в транзакції"
ER_ERROR_DURING_COMMIT
- cze "Chyba %d p-Bři COMMIT"
- dan "Modtog fejl %d mens kommandoen COMMIT blev udført"
- nla "Kreeg fout %d tijdens COMMIT"
- eng "Got error %d during COMMIT"
- est "Viga %d käsu COMMIT täitmisel"
- fre "Erreur %d lors du COMMIT"
- ger "Fehler %d beim COMMIT"
- hun "%d hiba a COMMIT vegrehajtasa soran"
- ita "Rilevato l'errore %d durante il COMMIT"
- por "Obteve erro %d durante COMMIT"
- rus "Получена ошибка %d в процеÑÑе COMMIT"
- serbian "Greška %d za vreme izvršavanja komande 'COMMIT'"
- spa "Obtenido error %d durante COMMIT"
- swe "Fick fel %d vid COMMIT"
- ukr "Отримано помилку %d під Ñ‡Ð°Ñ COMMIT"
+ cze "Chyba %M p-Bři COMMIT"
+ dan "Modtog fejl %M mens kommandoen COMMIT blev udført"
+ nla "Kreeg fout %M tijdens COMMIT"
+ eng "Got error %M during COMMIT"
+ est "Viga %M käsu COMMIT täitmisel"
+ fre "Erreur %M lors du COMMIT"
+ ger "Fehler %M beim COMMIT"
+ hun "%M hiba a COMMIT vegrehajtasa soran"
+ ita "Rilevato l'errore %M durante il COMMIT"
+ por "Obteve erro %M durante COMMIT"
+ rus "Получена ошибка %M в процеÑÑе COMMIT"
+ serbian "Greška %M za vreme izvršavanja komande 'COMMIT'"
+ spa "Obtenido error %M durante COMMIT"
+ swe "Fick fel %M vid COMMIT"
+ ukr "Отримано помилку %M під Ñ‡Ð°Ñ COMMIT"
ER_ERROR_DURING_ROLLBACK
- cze "Chyba %d p-Bři ROLLBACK"
- dan "Modtog fejl %d mens kommandoen ROLLBACK blev udført"
- nla "Kreeg fout %d tijdens ROLLBACK"
- eng "Got error %d during ROLLBACK"
- est "Viga %d käsu ROLLBACK täitmisel"
- fre "Erreur %d lors du ROLLBACK"
- ger "Fehler %d beim ROLLBACK"
- hun "%d hiba a ROLLBACK vegrehajtasa soran"
- ita "Rilevato l'errore %d durante il ROLLBACK"
- por "Obteve erro %d durante ROLLBACK"
- rus "Получена ошибка %d в процеÑÑе ROLLBACK"
- serbian "Greška %d za vreme izvršavanja komande 'ROLLBACK'"
- spa "Obtenido error %d durante ROLLBACK"
- swe "Fick fel %d vid ROLLBACK"
- ukr "Отримано помилку %d під Ñ‡Ð°Ñ ROLLBACK"
+ cze "Chyba %M p-Bři ROLLBACK"
+ dan "Modtog fejl %M mens kommandoen ROLLBACK blev udført"
+ nla "Kreeg fout %M tijdens ROLLBACK"
+ eng "Got error %M during ROLLBACK"
+ est "Viga %M käsu ROLLBACK täitmisel"
+ fre "Erreur %M lors du ROLLBACK"
+ ger "Fehler %M beim ROLLBACK"
+ hun "%M hiba a ROLLBACK vegrehajtasa soran"
+ ita "Rilevato l'errore %M durante il ROLLBACK"
+ por "Obteve erro %M durante ROLLBACK"
+ rus "Получена ошибка %M в процеÑÑе ROLLBACK"
+ serbian "Greška %M za vreme izvršavanja komande 'ROLLBACK'"
+ spa "Obtenido error %M durante ROLLBACK"
+ swe "Fick fel %M vid ROLLBACK"
+ ukr "Отримано помилку %M під Ñ‡Ð°Ñ ROLLBACK"
ER_ERROR_DURING_FLUSH_LOGS
- cze "Chyba %d p-Bři FLUSH_LOGS"
- dan "Modtog fejl %d mens kommandoen FLUSH_LOGS blev udført"
- nla "Kreeg fout %d tijdens FLUSH_LOGS"
- eng "Got error %d during FLUSH_LOGS"
- est "Viga %d käsu FLUSH_LOGS täitmisel"
- fre "Erreur %d lors du FLUSH_LOGS"
- ger "Fehler %d bei FLUSH_LOGS"
- hun "%d hiba a FLUSH_LOGS vegrehajtasa soran"
- ita "Rilevato l'errore %d durante il FLUSH_LOGS"
- por "Obteve erro %d durante FLUSH_LOGS"
- rus "Получена ошибка %d в процеÑÑе FLUSH_LOGS"
- serbian "Greška %d za vreme izvršavanja komande 'FLUSH_LOGS'"
- spa "Obtenido error %d durante FLUSH_LOGS"
- swe "Fick fel %d vid FLUSH_LOGS"
- ukr "Отримано помилку %d під Ñ‡Ð°Ñ FLUSH_LOGS"
+ cze "Chyba %M p-Bři FLUSH_LOGS"
+ dan "Modtog fejl %M mens kommandoen FLUSH_LOGS blev udført"
+ nla "Kreeg fout %M tijdens FLUSH_LOGS"
+ eng "Got error %M during FLUSH_LOGS"
+ est "Viga %M käsu FLUSH_LOGS täitmisel"
+ fre "Erreur %M lors du FLUSH_LOGS"
+ ger "Fehler %M bei FLUSH_LOGS"
+ hun "%M hiba a FLUSH_LOGS vegrehajtasa soran"
+ ita "Rilevato l'errore %M durante il FLUSH_LOGS"
+ por "Obteve erro %M durante FLUSH_LOGS"
+ rus "Получена ошибка %M в процеÑÑе FLUSH_LOGS"
+ serbian "Greška %M za vreme izvršavanja komande 'FLUSH_LOGS'"
+ spa "Obtenido error %M durante FLUSH_LOGS"
+ swe "Fick fel %M vid FLUSH_LOGS"
+ ukr "Отримано помилку %M під Ñ‡Ð°Ñ FLUSH_LOGS"
ER_ERROR_DURING_CHECKPOINT
- cze "Chyba %d p-Bři CHECKPOINT"
- dan "Modtog fejl %d mens kommandoen CHECKPOINT blev udført"
- nla "Kreeg fout %d tijdens CHECKPOINT"
- eng "Got error %d during CHECKPOINT"
- est "Viga %d käsu CHECKPOINT täitmisel"
- fre "Erreur %d lors du CHECKPOINT"
- ger "Fehler %d bei CHECKPOINT"
- hun "%d hiba a CHECKPOINT vegrehajtasa soran"
- ita "Rilevato l'errore %d durante il CHECKPOINT"
- por "Obteve erro %d durante CHECKPOINT"
- rus "Получена ошибка %d в процеÑÑе CHECKPOINT"
- serbian "Greška %d za vreme izvršavanja komande 'CHECKPOINT'"
- spa "Obtenido error %d durante CHECKPOINT"
- swe "Fick fel %d vid CHECKPOINT"
- ukr "Отримано помилку %d під Ñ‡Ð°Ñ CHECKPOINT"
+ cze "Chyba %M p-Bři CHECKPOINT"
+ dan "Modtog fejl %M mens kommandoen CHECKPOINT blev udført"
+ nla "Kreeg fout %M tijdens CHECKPOINT"
+ eng "Got error %M during CHECKPOINT"
+ est "Viga %M käsu CHECKPOINT täitmisel"
+ fre "Erreur %M lors du CHECKPOINT"
+ ger "Fehler %M bei CHECKPOINT"
+ hun "%M hiba a CHECKPOINT vegrehajtasa soran"
+ ita "Rilevato l'errore %M durante il CHECKPOINT"
+ por "Obteve erro %M durante CHECKPOINT"
+ rus "Получена ошибка %M в процеÑÑе CHECKPOINT"
+ serbian "Greška %M za vreme izvršavanja komande 'CHECKPOINT'"
+ spa "Obtenido error %M durante CHECKPOINT"
+ swe "Fick fel %M vid CHECKPOINT"
+ ukr "Отримано помилку %M під Ñ‡Ð°Ñ CHECKPOINT"
ER_NEW_ABORTING_CONNECTION 08S01
cze "Spojen-Bí %ld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s' (%-.64s) bylo přerušeno"
dan "Afbrød forbindelsen %ld til databasen '%-.192s' bruger: '%-.48s' vært: '%-.64s' (%-.64s)"
@@ -4271,18 +4271,18 @@ ER_TRANS_CACHE_FULL
swe "Transaktionen krävde mera än 'max_binlog_cache_size' minne. Öka denna mysqld-variabel och försök på nytt"
ukr "Ð¢Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ñ–Ñ Ð· багатьма виразами вимагає більше ніж 'max_binlog_cache_size' байтів Ð´Ð»Ñ Ð·Ð±ÐµÑ€Ñ–Ð³Ð°Ð½Ð½Ñ. Збільште цю змінну mysqld та Ñпробуйте знову"
ER_SLAVE_MUST_STOP
- dan "Denne handling kunne ikke udføres med kørende slave, brug først kommandoen STOP SLAVE"
- nla "Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst STOP SLAVE"
- eng "This operation cannot be performed with a running slave; run STOP SLAVE first"
- fre "Cette opération ne peut être réalisée avec un esclave actif, faites STOP SLAVE d'abord"
- ger "Diese Operation kann bei einem aktiven Slave nicht durchgeführt werden. Bitte zuerst STOP SLAVE ausführen"
- ita "Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE"
- por "Esta operação não pode ser realizada com um 'slave' em execução. Execute STOP SLAVE primeiro"
- rus "Эту операцию невозможно выполнить при работающем потоке подчиненного Ñервера. Сначала выполните STOP SLAVE"
- serbian "Ova operacija ne može biti izvršena dok je aktivan podređeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podređeni server."
- spa "Esta operación no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE"
- swe "Denna operation kan inte göras under replikering; Gör STOP SLAVE först"
- ukr "ÐžÐ¿ÐµÑ€Ð°Ñ†Ñ–Ñ Ð½Ðµ може бути виконана з запущеним підлеглим, Ñпочатку виконайте STOP SLAVE"
+ dan "Denne handling kunne ikke udføres med kørende slave '%2$*1$s', brug først kommandoen STOP SLAVE '%2$*1$s'"
+ nla "Deze operatie kan niet worden uitgevoerd met een actieve slave '%2$*1$s', doe eerst STOP SLAVE '%2$*1$s'"
+ eng "This operation cannot be performed as you have a running slave '%2$*1$s'; run STOP SLAVE '%2$*1$s' first"
+ fre "Cette opération ne peut être réalisée avec un esclave '%2$*1$s' actif, faites STOP SLAVE '%2$*1$s' d'abord"
+ ger "Diese Operation kann bei einem aktiven Slave '%2$*1$s' nicht durchgeführt werden. Bitte zuerst STOP SLAVE '%2$*1$s' ausführen"
+ ita "Questa operazione non puo' essere eseguita con un database 'slave' '%2$*1$s' che gira, lanciare prima STOP SLAVE '%2$*1$s'"
+ por "Esta operação não pode ser realizada com um 'slave' '%2$*1$s' em execução. Execute STOP SLAVE '%2$*1$s' primeiro"
+ rus "Эту операцию невозможно выполнить при работающем потоке подчиненного Ñервера %2$*1$s. Сначала выполните STOP SLAVE '%2$*1$s'"
+ serbian "Ova operacija ne može biti izvršena dok je aktivan podređeni '%2$*1$s' server. Zadajte prvo komandu 'STOP SLAVE '%2$*1$s'' da zaustavite podređeni server."
+ spa "Esta operación no puede ser hecha con el esclavo '%2$*1$s' funcionando, primero use STOP SLAVE '%2$*1$s'"
+ swe "Denna operation kan inte göras under replikering; Du har en aktiv förbindelse till '%2$*1$s'. Gör STOP SLAVE '%2$*1$s' först"
+ ukr "ÐžÐ¿ÐµÑ€Ð°Ñ†Ñ–Ñ Ð½Ðµ може бути виконана з запущеним підлеглим '%2$*1$s', Ñпочатку виконайте STOP SLAVE '%2$*1$s'"
ER_SLAVE_NOT_RUNNING
dan "Denne handling kræver en kørende slave. Konfigurer en slave og brug kommandoen START SLAVE"
nla "Deze operatie vereist een actieve slave, configureer slave en doe dan START SLAVE"
@@ -4310,11 +4310,11 @@ ER_BAD_SLAVE
swe "Servern är inte konfigurerade som en replikationsslav. Ändra konfigurationsfilen eller gör CHANGE MASTER TO"
ukr "Сервер не зконфігуровано Ñк підлеглий, виправте це у файлі конфігурації або з CHANGE MASTER TO"
ER_MASTER_INFO
- eng "Could not initialize master info structure; more error messages can be found in the MariaDB error log"
- fre "Impossible d'initialiser les structures d'information de maître, vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MariaDB"
- ger "Konnte Master-Info-Struktur nicht initialisieren. Weitere Fehlermeldungen können im MariaDB-Error-Log eingesehen werden"
- serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'"
- swe "Kunde inte initialisera replikationsstrukturerna. See MariaDB fel fil för mera information"
+ eng "Could not initialize master info structure for '%.*s'; more error messages can be found in the MariaDB error log"
+ fre "Impossible d'initialiser les structures d'information de maître '%.*s', vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MariaDB"
+ ger "Konnte Master-Info-Struktur '%.*s' nicht initialisieren. Weitere Fehlermeldungen können im MariaDB-Error-Log eingesehen werden"
+ serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info' '%.*s'"
+ swe "Kunde inte initialisera replikationsstrukturerna för '%.*s'. See MariaDB fel fil för mera information"
ER_SLAVE_THREAD
dan "Kunne ikke danne en slave-tråd; check systemressourcerne"
nla "Kon slave thread niet aanmaken, controleer systeem resources"
@@ -4345,13 +4345,13 @@ ER_TOO_MANY_USER_CONNECTIONS 42000
ER_SET_CONSTANTS_ONLY
dan "Du må kun bruge konstantudtryk med SET"
nla "U mag alleen constante expressies gebruiken bij SET"
- eng "You may only use constant expressions with SET"
+ eng "You may only use constant expressions in this statement"
est "Ainult konstantsed suurused on lubatud SET klauslis"
fre "Seules les expressions constantes sont autorisées avec SET"
- ger "Bei SET dürfen nur konstante Ausdrücke verwendet werden"
+ ger "Bei diesem Befehl dürfen nur konstante Ausdrücke verwendet werden"
ita "Si possono usare solo espressioni costanti con SET"
por "Você pode usar apenas expressões constantes com SET"
- rus "Ð’Ñ‹ можете иÑпользовать в SET только конÑтантные выражениÑ"
+ rus "С Ñтой командой вы можете иÑпользовать только конÑтантные выражениÑ"
serbian "Možete upotrebiti samo konstantan iskaz sa komandom 'SET'"
spa "Tu solo debes usar expresiones constantes con SET"
swe "Man kan endast använda konstantuttryck med SET"
@@ -5330,8 +5330,8 @@ ER_VIEW_CHECK_FAILED
rus "проверка CHECK OPTION Ð´Ð»Ñ VIEW '%-.192s.%-.192s' провалилаÑÑŒ"
ukr "Перевірка CHECK OPTION Ð´Ð»Ñ VIEW '%-.192s.%-.192s' не пройшла"
ER_PROCACCESS_DENIED_ERROR 42000
- eng "%-.16s command denied to user '%-.48s'@'%-.64s' for routine '%-.192s'"
- ger "Befehl %-.16s nicht zulässig für Benutzer '%-.48s'@'%-.64s' in Routine '%-.192s'"
+ eng "%-.32s command denied to user '%-.48s'@'%-.64s' for routine '%-.192s'"
+ ger "Befehl %-.32s nicht zulässig für Benutzer '%-.48s'@'%-.64s' in Routine '%-.192s'"
ER_RELAY_LOG_FAIL
eng "Failed purging old relay logs: %s"
ger "Bereinigen alter Relais-Logs fehlgeschlagen: %s"
@@ -5384,8 +5384,8 @@ ER_LOGGING_PROHIBIT_CHANGING_OF
eng "Binary logging and replication forbid changing the global server %s"
ger "Binärlogs und Replikation verhindern Wechsel des globalen Servers %s"
ER_NO_FILE_MAPPING
- eng "Can't map file: %-.200s, errno: %d"
- ger "Kann Datei nicht abbilden: %-.200s, Fehler: %d"
+ eng "Can't map file: %-.200s, errno: %M"
+ ger "Kann Datei nicht abbilden: %-.200s, Fehler: %M"
ER_WRONG_MAGIC
eng "Wrong magic in %-.64s"
ger "Falsche magische Zahlen in %-.64s"
@@ -5548,7 +5548,7 @@ ER_WARN_CANT_DROP_DEFAULT_KEYCACHE
ger "Der vorgabemäßige Schlüssel-Cache kann nicht gelöscht werden"
ER_TOO_BIG_DISPLAYWIDTH 42000 S1009
eng "Display width out of range for '%-.192s' (max = %lu)"
- ger "Anzeigebreite außerhalb des zulässigen Bereichs für '%-.192s' (Maximum: %lu)"
+ ger "Anzeigebreite außerhalb des zulässigen Bereichs für '%-.192s' (Maximum = %lu)"
ER_XAER_DUPID XAE08
eng "XAER_DUPID: The XID already exists"
ger "XAER_DUPID: Die XID existiert bereits"
@@ -5886,8 +5886,8 @@ ER_EVENT_ALREADY_EXISTS
eng "Event '%-.192s' already exists"
ger "Event '%-.192s' existiert bereits"
ER_EVENT_STORE_FAILED
- eng "Failed to store event %s. Error code %d from storage engine."
- ger "Speichern von Event %s fehlgeschlagen. Fehlercode der Speicher-Engine: %d"
+ eng "Failed to store event %s. Error code %M from storage engine."
+ ger "Speichern von Event %s fehlgeschlagen. Fehlercode der Speicher-Engine: %M"
ER_EVENT_DOES_NOT_EXIST
eng "Unknown event '%-.192s'"
ger "Unbekanntes Event '%-.192s'"
@@ -5991,8 +5991,8 @@ ER_EVENT_MODIFY_QUEUE_ERROR
eng "Internal scheduler error %d"
ger "Interner Scheduler-Fehler %d"
ER_EVENT_SET_VAR_ERROR
- eng "Error during starting/stopping of the scheduler. Error code %u"
- ger "Fehler während des Startens oder Anhalten des Schedulers. Fehlercode %u"
+ eng "Error during starting/stopping of the scheduler. Error code %M"
+ ger "Fehler während des Startens oder Anhalten des Schedulers. Fehlercode %M"
ER_PARTITION_MERGE_ERROR
eng "Engine cannot be used in partitioned tables"
ger "Engine kann in partitionierten Tabellen nicht verwendet werden"
@@ -6156,8 +6156,8 @@ ER_DELAYED_NOT_SUPPORTED
eng "DELAYED option not supported for table '%-.192s'"
ger "Die DELAYED-Option wird für Tabelle '%-.192s' nicht unterstützt"
WARN_NO_MASTER_INFO
- eng "The master info structure does not exist"
- ger "Die Master-Info-Struktur existiert nicht"
+ eng "There is no master connection '%.*s'"
+ ger "Die Master-Info-Struktur existiert nicht '%.*s'"
WARN_OPTION_IGNORED
eng "<%-.64s> option ignored"
ger "Option <%-.64s> ignoriert"
@@ -6588,3 +6588,15 @@ ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT
ER_NO_SUCH_TABLE_IN_ENGINE 42S02
eng "Table '%-.192s.%-.192s' doesn't exist in engine"
swe "Det finns ingen tabell som heter '%-.192s.%-.192s' i handlern"
+ER_TARGET_NOT_EXPLAINABLE
+ eng "Target is not running an EXPLAINable command"
+ER_CONNECTION_ALREADY_EXISTS
+ eng "Connection '%.*s' conflicts with existing connection '%.*s'"
+ER_MASTER_LOG_PREFIX
+ eng "Master '%.*s': "
+ER_CANT_START_STOP_SLAVE
+ eng "Can't %s SLAVE '%.*s'"
+ER_SLAVE_STARTED
+ eng "SLAVE '%.*s' started"
+ER_SLAVE_STOPPED
+ eng "SLAVE '%.*s' stopped" \ No newline at end of file
diff --git a/sql/slave.cc b/sql/slave.cc
index 7b7fd12e267..da10169d97b 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -71,8 +71,10 @@ char slave_skip_error_names[SHOW_VAR_FUNC_BUFF_SIZE];
char* slave_load_tmpdir = 0;
Master_info *active_mi= 0;
+Master_info_index *master_info_index;
my_bool replicate_same_server_id;
ulonglong relay_log_space_limit = 0;
+LEX_STRING default_master_connection_name= { (char*) "", 0 };
/*
When slave thread exits, we need to remember the temporary tables so we
@@ -144,7 +146,8 @@ static int process_io_create_file(Master_info* mi, Create_file_log_event* cev);
static bool wait_for_relay_log_space(Relay_log_info* rli);
static inline bool io_slave_killed(THD* thd,Master_info* mi);
static inline bool sql_slave_killed(THD* thd,Relay_log_info* rli);
-static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type);
+static int init_slave_thread(THD* thd, Master_info *mi,
+ SLAVE_THD_TYPE thd_type);
static void print_slave_skip_errors(void);
static int safe_connect(THD* thd, MYSQL* mysql, Master_info* mi);
static int safe_reconnect(THD* thd, MYSQL* mysql, Master_info* mi,
@@ -159,6 +162,8 @@ static int terminate_slave_thread(THD *thd,
volatile uint *slave_running,
bool skip_lock);
static bool check_io_slave_killed(THD *thd, Master_info *mi, const char *info);
+static bool send_show_master_info_header(THD *thd, bool full);
+static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full);
/*
Find out which replications threads are running
@@ -263,15 +268,33 @@ int init_slave()
So it's safer to take the lock.
*/
mysql_mutex_lock(&LOCK_active_mi);
- /*
- TODO: re-write this to interate through the list of files
- for multi-master
- */
- active_mi= new Master_info(relay_log_recovery);
if (pthread_key_create(&RPL_MASTER_INFO, NULL))
goto err;
+ master_info_index= new Master_info_index;
+ if (!master_info_index || master_info_index->init_all_master_info())
+ {
+ sql_print_error("Failed to initialize multi master structures");
+ mysql_mutex_unlock(&LOCK_active_mi);
+ DBUG_RETURN(1);
+ }
+ if (!(active_mi= new Master_info(&default_master_connection_name,
+ relay_log_recovery)) ||
+ active_mi->error())
+ {
+ delete active_mi;
+ active_mi= 0;
+ goto err;
+ }
+
+ if (master_info_index->add_master_info(active_mi, FALSE))
+ {
+ delete active_mi;
+ active_mi= 0;
+ goto err;
+ }
+
/*
If --slave-skip-errors=... was not used, the string value for the
system variable has not been set up yet. Do it now.
@@ -286,18 +309,11 @@ int init_slave()
If master_host is specified, create the master_info file if it doesn't
exists.
*/
- if (!active_mi)
- {
- sql_print_error("Failed to allocate memory for the master info structure");
- error= 1;
- goto err;
- }
if (init_master_info(active_mi,master_info_file,relay_log_info_file,
1, (SLAVE_IO | SLAVE_SQL)))
{
sql_print_error("Failed to initialize the master info structure");
- error= 1;
goto err;
}
@@ -313,14 +329,18 @@ int init_slave()
SLAVE_IO | SLAVE_SQL))
{
sql_print_error("Failed to create slave threads");
- error= 1;
goto err;
}
}
-err:
+end:
mysql_mutex_unlock(&LOCK_active_mi);
DBUG_RETURN(error);
+
+err:
+ sql_print_error("Failed to allocate memory for the Master Info structure");
+ error= 1;
+ goto end;
}
/*
@@ -820,42 +840,19 @@ void end_slave()
running presently. If a START SLAVE was in progress, the mutex lock below
will make us wait until slave threads have started, and START SLAVE
returns, then we terminate them here.
+
+ We can also be called by cleanup(), which only happens if some
+ startup parameter to the server was wrong.
*/
mysql_mutex_lock(&LOCK_active_mi);
- if (active_mi)
- {
- /*
- TODO: replace the line below with
- list_walk(&master_list, (list_walk_action)end_slave_on_walk,0);
- once multi-master code is ready.
- */
- terminate_slave_threads(active_mi,SLAVE_FORCE_ALL);
- }
+ /* This will call terminate_slave_threads() on all connections */
+ delete master_info_index;
+ master_info_index= 0;
+ active_mi= 0;
mysql_mutex_unlock(&LOCK_active_mi);
DBUG_VOID_RETURN;
}
-/**
- Free all resources used by slave threads at time of executing shutdown.
- The routine must be called after all possible users of @c active_mi
- have left.
-
- SYNOPSIS
- close_active_mi()
-
-*/
-void close_active_mi()
-{
- mysql_mutex_lock(&LOCK_active_mi);
- if (active_mi)
- {
- end_master_info(active_mi);
- delete active_mi;
- active_mi= 0;
- }
- mysql_mutex_unlock(&LOCK_active_mi);
-}
-
static bool io_slave_killed(THD* thd, Master_info* mi)
{
DBUG_ENTER("io_slave_killed");
@@ -1211,11 +1208,12 @@ bool is_network_error(uint errorno)
static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
{
- char err_buff[MAX_SLAVE_ERRMSG];
+ char err_buff[MAX_SLAVE_ERRMSG], err_buff2[MAX_SLAVE_ERRMSG];
const char* errmsg= 0;
int err_code= 0;
MYSQL_RES *master_res= 0;
MYSQL_ROW master_row;
+ uint version= mysql_get_server_version(mysql) / 10000;
DBUG_ENTER("get_master_version_and_clock");
/*
@@ -1227,29 +1225,34 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
if (!my_isdigit(&my_charset_bin,*mysql->server_version))
{
- errmsg = "Master reported unrecognized MySQL version";
+ errmsg= err_buff2;
+ snprintf(err_buff2, sizeof(err_buff2),
+ "Master reported unrecognized MySQL version: %s",
+ mysql->server_version);
err_code= ER_SLAVE_FATAL_ERROR;
- sprintf(err_buff, ER(err_code), errmsg);
+ sprintf(err_buff, ER(err_code), err_buff2);
}
else
{
/*
Note the following switch will bug when we have MySQL branch 30 ;)
*/
- switch (*mysql->server_version)
- {
- case '0':
- case '1':
- case '2':
- errmsg = "Master reported unrecognized MySQL version";
+ switch (version) {
+ case 0:
+ case 1:
+ case 2:
+ errmsg= err_buff2;
+ snprintf(err_buff2, sizeof(err_buff2),
+ "Master reported unrecognized MySQL version: %s",
+ mysql->server_version);
err_code= ER_SLAVE_FATAL_ERROR;
- sprintf(err_buff, ER(err_code), errmsg);
+ sprintf(err_buff, ER(err_code), err_buff2);
break;
- case '3':
+ case 3:
mi->rli.relay_log.description_event_for_queue= new
Format_description_log_event(1, mysql->server_version);
break;
- case '4':
+ case 4:
mi->rli.relay_log.description_event_for_queue= new
Format_description_log_event(3, mysql->server_version);
break;
@@ -1466,10 +1469,10 @@ maybe it is a *VERY OLD MASTER*.");
*/
/* redundant with rest of code but safer against later additions */
- if (*mysql->server_version == '3')
+ if (version == 3)
goto err;
- if (*mysql->server_version == '4')
+ if (version == 4)
{
master_res= NULL;
if (!mysql_real_query(mysql,
@@ -1532,7 +1535,7 @@ inconsistency if replicated data deals with collation.");
This check is only necessary for 4.x masters (and < 5.0.4 masters but
those were alpha).
*/
- if (*mysql->server_version == '4')
+ if (version == 4)
{
master_res= NULL;
if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT @@GLOBAL.TIME_ZONE")) &&
@@ -1747,6 +1750,39 @@ past_checksum:
}
}
}
+
+ /* Announce MariaDB slave capabilities. */
+ DBUG_EXECUTE_IF("simulate_slave_capability_none", goto after_set_capability;);
+ {
+ int rc= DBUG_EVALUATE_IF("simulate_slave_capability_old_53",
+ mysql_real_query(mysql, STRING_WITH_LEN("SET @mariadb_slave_capability="
+ STRINGIFY_ARG(MARIA_SLAVE_CAPABILITY_ANNOTATE))),
+ mysql_real_query(mysql, STRING_WITH_LEN("SET @mariadb_slave_capability="
+ STRINGIFY_ARG(MARIA_SLAVE_CAPABILITY_MINE))));
+ if (rc)
+ {
+ err_code= mysql_errno(mysql);
+ if (is_network_error(err_code))
+ {
+ mi->report(ERROR_LEVEL, err_code,
+ "Setting @mariadb_slave_capability failed with error: %s",
+ mysql_error(mysql));
+ goto network_err;
+ }
+ else
+ {
+ /* Fatal error */
+ errmsg= "The slave I/O thread stops because a fatal error is "
+ "encountered when it tries to set @mariadb_slave_capability.";
+ sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql));
+ goto err;
+ }
+ }
+ }
+#ifndef DBUG_OFF
+after_set_capability:
+#endif
+
err:
if (errmsg)
{
@@ -1829,9 +1865,9 @@ Waiting for the slave SQL thread to free enough relay log space");
#endif
if (rli->sql_force_rotate_relay)
{
- mysql_mutex_lock(&active_mi->data_lock);
+ mysql_mutex_lock(&mi->data_lock);
rotate_relay_log(rli->mi);
- mysql_mutex_unlock(&active_mi->data_lock);
+ mysql_mutex_unlock(&mi->data_lock);
rli->sql_force_rotate_relay= false;
}
@@ -1985,15 +2021,36 @@ int register_slave_on_master(MYSQL* mysql, Master_info *mi,
@retval FALSE success
@retval TRUE failure
*/
-bool show_master_info(THD* thd, Master_info* mi)
+
+bool show_master_info(THD *thd, Master_info *mi, bool full)
+{
+ DBUG_ENTER("show_master_info");
+
+ if (send_show_master_info_header(thd, full))
+ DBUG_RETURN(TRUE);
+ if (send_show_master_info_data(thd, mi, full))
+ DBUG_RETURN(TRUE);
+ my_eof(thd);
+ DBUG_RETURN(FALSE);
+}
+
+static bool send_show_master_info_header(THD *thd, bool full)
{
- // TODO: fix this for multi-master
List<Item> field_list;
Protocol *protocol= thd->protocol;
- DBUG_ENTER("show_master_info");
+ Master_info *mi;
+ DBUG_ENTER("show_master_info_header");
+
+ if (full)
+ {
+ field_list.push_back(new Item_empty_string("Connection_name",
+ MAX_CONNECTION_NAME));
+ field_list.push_back(new Item_empty_string("Slave_SQL_State",
+ 30));
+ }
field_list.push_back(new Item_empty_string("Slave_IO_State",
- 14));
+ 30));
field_list.push_back(new Item_empty_string("Master_Host",
sizeof(mi->host)));
field_list.push_back(new Item_empty_string("Master_User",
@@ -2056,22 +2113,51 @@ bool show_master_info(THD* thd, Master_info* mi)
FN_REFLEN));
field_list.push_back(new Item_return_int("Master_Server_Id", sizeof(ulong),
MYSQL_TYPE_LONG));
-
+ if (full)
+ {
+ field_list.push_back(new Item_return_int("Retried_transactions",
+ 10, MYSQL_TYPE_LONG));
+ field_list.push_back(new Item_return_int("Max_relay_log_size",
+ 10, MYSQL_TYPE_LONGLONG));
+ field_list.push_back(new Item_return_int("Executed_log_entries",
+ 10, MYSQL_TYPE_LONG));
+ field_list.push_back(new Item_return_int("Slave_received_heartbeats",
+ 10, MYSQL_TYPE_LONG));
+ field_list.push_back(new Item_float("Slave_heartbeat_period",
+ 0.0, 3, 10));
+ }
if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
+ DBUG_RETURN(FALSE);
+}
+
+
+static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full)
+{
+ DBUG_ENTER("send_show_master_info_data");
if (mi->host[0])
{
DBUG_PRINT("info",("host is set: '%s'", mi->host));
String *packet= &thd->packet;
+ Protocol *protocol= thd->protocol;
+ char buf[256];
+ String tmp(buf, sizeof(buf), &my_charset_bin);
+
protocol->prepare_for_resend();
/*
slave_running can be accessed without run_lock but not other
non-volotile members like mi->io_thd, which is guarded by the mutex.
*/
+ if (full)
+ protocol->store(mi->connection_name.str, mi->connection_name.length,
+ &my_charset_bin);
mysql_mutex_lock(&mi->run_lock);
+ if (full)
+ protocol->store(mi->rli.sql_thd ? mi->rli.sql_thd->proc_info : "",
+ &my_charset_bin);
protocol->store(mi->io_thd ? mi->io_thd->proc_info : "", &my_charset_bin);
mysql_mutex_unlock(&mi->run_lock);
@@ -2097,8 +2183,6 @@ bool show_master_info(THD* thd, Master_info* mi)
protocol->store(rpl_filter->get_do_db());
protocol->store(rpl_filter->get_ignore_db());
- char buf[256];
- String tmp(buf, sizeof(buf), &my_charset_bin);
rpl_filter->get_do_table(&tmp);
protocol->store(&tmp);
rpl_filter->get_ignore_table(&tmp);
@@ -2204,6 +2288,14 @@ bool show_master_info(THD* thd, Master_info* mi)
}
// Master_Server_id
protocol->store((uint32) mi->master_id);
+ if (full)
+ {
+ protocol->store((uint32) mi->rli.retried_trans);
+ protocol->store((ulonglong) mi->rli.max_relay_log_size);
+ protocol->store((uint32) mi->rli.executed_entries);
+ protocol->store((uint32) mi->received_heartbeats);
+ protocol->store((double) mi->heartbeat_period, 3, &tmp);
+ }
mysql_mutex_unlock(&mi->rli.err_lock);
mysql_mutex_unlock(&mi->err_lock);
@@ -2213,6 +2305,69 @@ bool show_master_info(THD* thd, Master_info* mi)
if (my_net_write(&thd->net, (uchar*) thd->packet.ptr(), packet->length()))
DBUG_RETURN(TRUE);
}
+ DBUG_RETURN(FALSE);
+}
+
+
+/* Used to sort connections by name */
+
+static int cmp_mi_by_name(const Master_info **arg1,
+ const Master_info **arg2)
+{
+ return my_strcasecmp(system_charset_info, (*arg1)->connection_name.str,
+ (*arg2)->connection_name.str);
+}
+
+
+/**
+ Execute a SHOW FULL SLAVE STATUS statement.
+
+ @param thd Pointer to THD object for the client thread executing the
+ statement.
+
+ Elements are sorted according to the original connection_name.
+
+ @retval FALSE success
+ @retval TRUE failure
+
+ @note
+ master_info_index is protected by LOCK_active_mi.
+*/
+
+bool show_all_master_info(THD* thd)
+{
+ uint i, elements;
+ Master_info **tmp;
+ DBUG_ENTER("show_master_info");
+ mysql_mutex_assert_owner(&LOCK_active_mi);
+
+ if (send_show_master_info_header(thd, 1))
+ DBUG_RETURN(TRUE);
+
+ if (!(elements= master_info_index->master_info_hash.records))
+ goto end;
+
+ /*
+ Sort lines to get them into a predicted order
+ (needed for test cases and to not confuse users)
+ */
+ if (!(tmp= (Master_info**) thd->alloc(sizeof(Master_info*) * elements)))
+ DBUG_RETURN(TRUE);
+
+ for (i= 0; i < elements; i++)
+ {
+ tmp[i]= (Master_info *) my_hash_element(&master_info_index->
+ master_info_hash, i);
+ }
+ my_qsort(tmp, elements, sizeof(Master_info*), (qsort_cmp) cmp_mi_by_name);
+
+ for (i= 0; i < elements; i++)
+ {
+ if (send_show_master_info_data(thd, tmp[i], 1))
+ DBUG_RETURN(TRUE);
+ }
+
+end:
my_eof(thd);
DBUG_RETURN(FALSE);
}
@@ -2266,23 +2421,35 @@ void set_slave_thread_default_charset(THD* thd, Relay_log_info const *rli)
init_slave_thread()
*/
-static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type)
+static int init_slave_thread(THD* thd, Master_info *mi,
+ SLAVE_THD_TYPE thd_type)
{
DBUG_ENTER("init_slave_thread");
-#if !defined(DBUG_OFF)
- int simulate_error= 0;
-#endif
+ int simulate_error __attribute__((unused))= 0;
+ DBUG_EXECUTE_IF("simulate_io_slave_error_on_init",
+ simulate_error|= (1 << SLAVE_THD_IO););
+ DBUG_EXECUTE_IF("simulate_sql_slave_error_on_init",
+ simulate_error|= (1 << SLAVE_THD_SQL););
+ /* We must call store_globals() before doing my_net_init() */
+ if (init_thr_lock() || thd->store_globals() ||
+ my_net_init(&thd->net, 0, MYF(MY_THREAD_SPECIFIC)) ||
+ IF_DBUG(simulate_error & (1<< thd_type), 0))
+ {
+ thd->cleanup();
+ DBUG_RETURN(-1);
+ }
+
thd->system_thread = (thd_type == SLAVE_THD_SQL) ?
SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO;
thd->security_ctx->skip_grants();
- my_net_init(&thd->net, 0);
/*
Adding MAX_LOG_EVENT_HEADER_LEN to the max_allowed_packet on all
slave threads, since a replication event can become this much larger
than the corresponding packet (query) sent from client to master.
*/
thd->variables.max_allowed_packet= slave_max_allowed_packet;
- thd->slave_thread = 1;
+ thd->slave_thread= 1;
+ thd->connection_name= mi->connection_name;
thd->enable_slow_log= opt_log_slow_slave_statements;
thd->variables.log_slow_filter= global_system_variables.log_slow_filter;
set_slave_thread_options(thd);
@@ -2291,17 +2458,6 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type)
thd->thread_id= thd->variables.pseudo_thread_id= thread_id++;
mysql_mutex_unlock(&LOCK_thread_count);
- DBUG_EXECUTE_IF("simulate_io_slave_error_on_init",
- simulate_error|= (1 << SLAVE_THD_IO););
- DBUG_EXECUTE_IF("simulate_sql_slave_error_on_init",
- simulate_error|= (1 << SLAVE_THD_SQL););
- if (init_thr_lock() || thd->store_globals() ||
- IF_DBUG(simulate_error & (1<< thd_type), 0))
- {
- thd->cleanup();
- DBUG_RETURN(-1);
- }
-
if (thd_type == SLAVE_THD_SQL)
thd_proc_info(thd, "Waiting for the next event in relay log");
else
@@ -2598,7 +2754,10 @@ int apply_event_and_update_pos(Log_event* ev, THD* thd, Relay_log_info* rli)
int reason= ev->shall_skip(rli);
if (reason == Log_event::EVENT_SKIP_COUNT)
- sql_slave_skip_counter= --rli->slave_skip_counter;
+ {
+ DBUG_ASSERT(rli->slave_skip_counter > 0);
+ rli->slave_skip_counter--;
+ }
mysql_mutex_unlock(&rli->data_lock);
if (reason == Log_event::EVENT_SKIP_NOT)
exec_res= ev->apply_event(rli);
@@ -2847,6 +3006,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli)
mysql_mutex_lock(&rli->data_lock); // because of SHOW STATUS
rli->trans_retries++;
rli->retried_trans++;
+ statistic_increment(slave_retried_transactions, LOCK_status);
mysql_mutex_unlock(&rli->data_lock);
DBUG_PRINT("info", ("Slave retries transaction "
"rli->trans_retries: %lu", rli->trans_retries));
@@ -3021,7 +3181,7 @@ pthread_handler_t handle_slave_io(void *arg)
pthread_detach_this_thread();
thd->thread_stack= (char*) &thd; // remember where our stack is
mi->clear_error();
- if (init_slave_thread(thd, SLAVE_THD_IO))
+ if (init_slave_thread(thd, mi, SLAVE_THD_IO))
{
mysql_cond_broadcast(&mi->start_cond);
sql_print_error("Failed during slave I/O thread initialization");
@@ -3030,7 +3190,7 @@ pthread_handler_t handle_slave_io(void *arg)
mysql_mutex_lock(&LOCK_thread_count);
threads.append(thd);
mysql_mutex_unlock(&LOCK_thread_count);
- mi->slave_running = 1;
+ mi->slave_running = MYSQL_SLAVE_RUN_NOT_CONNECT;
mi->abort_slave = 0;
mysql_mutex_unlock(&mi->run_lock);
mysql_cond_broadcast(&mi->start_cond);
@@ -3335,14 +3495,12 @@ err_during_init:
mi->rli.relay_log.description_event_for_queue= 0;
// TODO: make rpl_status part of Master_info
change_rpl_status(RPL_ACTIVE_SLAVE,RPL_IDLE_SLAVE);
- DBUG_ASSERT(thd->net.buff != 0);
- net_end(&thd->net); // destructor will not free it, because net.vio is 0
mysql_mutex_lock(&LOCK_thread_count);
THD_CHECK_SENTRY(thd);
delete thd;
mysql_mutex_unlock(&LOCK_thread_count);
mi->abort_slave= 0;
- mi->slave_running= 0;
+ mi->slave_running= MYSQL_SLAVE_NOT_RUN;
mi->io_thd= 0;
/*
Note: the order of the two following calls (first broadcast, then unlock)
@@ -3420,7 +3578,8 @@ pthread_handler_t handle_slave_sql(void *arg)
my_off_t UNINIT_VAR(saved_log_pos);
my_off_t UNINIT_VAR(saved_master_log_pos);
my_off_t saved_skip= 0;
- Relay_log_info* rli = &((Master_info*)arg)->rli;
+ Master_info *mi= ((Master_info*)arg);
+ Relay_log_info* rli = &mi->rli;
const char *errmsg;
// needs to call my_thread_init(), otherwise we get a coredump in DBUG_ stuff
@@ -3434,6 +3593,7 @@ pthread_handler_t handle_slave_sql(void *arg)
thd->thread_stack = (char*)&thd; // remember where our stack is
DBUG_ASSERT(rli->inited);
+ DBUG_ASSERT(rli->mi == mi);
mysql_mutex_lock(&rli->run_lock);
DBUG_ASSERT(!rli->slave_running);
errmsg= 0;
@@ -3445,10 +3605,10 @@ pthread_handler_t handle_slave_sql(void *arg)
/* Inform waiting threads that slave has started */
rli->slave_run_id++;
- rli->slave_running = 1;
+ rli->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT;
pthread_detach_this_thread();
- if (init_slave_thread(thd, SLAVE_THD_SQL))
+ if (init_slave_thread(thd, mi, SLAVE_THD_SQL))
{
/*
TODO: this is currently broken - slave start and change master
@@ -3689,6 +3849,7 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
}
goto err;
}
+ rli->executed_entries++;
}
/* Thread stopped. Print the current replication position to the log */
@@ -3719,9 +3880,9 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
err_during_init:
/* We need data_lock, at least to wake up any waiting master_pos_wait() */
mysql_mutex_lock(&rli->data_lock);
- DBUG_ASSERT(rli->slave_running == 1); // tracking buffer overrun
+ DBUG_ASSERT(rli->slave_running == MYSQL_SLAVE_RUN_NOT_CONNECT); // tracking buffer overrun
/* When master_pos_wait() wakes up it will check this and terminate */
- rli->slave_running= 0;
+ rli->slave_running= MYSQL_SLAVE_NOT_RUN;
/* Forget the relay log's format */
delete rli->relay_log.description_event_for_exec;
rli->relay_log.description_event_for_exec= 0;
@@ -3739,8 +3900,6 @@ err_during_init:
to avoid unneeded position re-init
*/
thd->temporary_tables = 0; // remove tempation from destructor to close them
- DBUG_ASSERT(thd->net.buff != 0);
- net_end(&thd->net); // destructor will not free it, because we are weird
DBUG_ASSERT(rli->sql_thd == thd);
THD_CHECK_SENTRY(thd);
rli->sql_thd= 0;
@@ -5236,11 +5395,6 @@ static Log_event* next_event(Relay_log_info* rli)
mysql_mutex_lock(log_lock);
if (rli->relay_log.is_active(rli->linfo.log_file_name))
{
-#ifdef EXTRA_DEBUG
- if (global_system_variables.log_warnings)
- sql_print_information("next log '%s' is currently active",
- rli->linfo.log_file_name);
-#endif
rli->cur_log= cur_log= rli->relay_log.get_log_file();
rli->cur_log_old_open_count= rli->relay_log.get_open_count();
DBUG_ASSERT(rli->cur_log_fd == -1);
@@ -5323,11 +5477,6 @@ static Log_event* next_event(Relay_log_info* rli)
ourselves. We are sure that the log is still not hot now (a log can get
from hot to cold, but not from cold to hot). No need for LOCK_log.
*/
-#ifdef EXTRA_DEBUG
- if (global_system_variables.log_warnings)
- sql_print_information("next log '%s' is not active",
- rli->linfo.log_file_name);
-#endif
// open_binlog() will check the magic header
if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name,
&errmsg)) <0)
@@ -5507,20 +5656,14 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
*/
bool rpl_master_erroneous_autoinc(THD *thd)
{
- if (active_mi && active_mi->rli.sql_thd == thd)
+ if (thd->rli_slave)
{
- Relay_log_info *rli= &active_mi->rli;
DBUG_EXECUTE_IF("simulate_bug33029", return TRUE;);
- return rpl_master_has_bug(rli, 33029, FALSE, NULL, NULL);
+ return rpl_master_has_bug(thd->rli_slave, 33029, FALSE, NULL, NULL);
}
return FALSE;
}
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class I_List_iterator<i_string>;
-template class I_List_iterator<i_string_pair>;
-#endif
-
/**
@} (end of group Replication)
*/
diff --git a/sql/slave.h b/sql/slave.h
index 6b4bcffe109..565f40b7236 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -45,9 +45,12 @@
#define MAX_SLAVE_ERROR 2000
+#define MAX_REPLICATION_THREAD 64
+
// Forward declarations
class Relay_log_info;
class Master_info;
+class Master_info_index;
int init_intvar_from_file(int* var, IO_CACHE* f, int default_val);
int init_strvar_from_file(char *var, int max_size, IO_CACHE *f,
@@ -197,7 +200,8 @@ int mysql_table_dump(THD* thd, const char* db,
int fetch_master_table(THD* thd, const char* db_name, const char* table_name,
Master_info* mi, MYSQL* mysql, bool overwrite);
-bool show_master_info(THD* thd, Master_info* mi);
+bool show_master_info(THD* thd, Master_info* mi, bool full);
+bool show_all_master_info(THD* thd);
bool show_binlog_info(THD* thd);
bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
bool (*pred)(const void *), const void *param);
@@ -231,6 +235,9 @@ bool net_request_file(NET* net, const char* fname);
extern bool volatile abort_loop;
extern Master_info main_mi, *active_mi; /* active_mi for multi-master */
+extern Master_info *default_master_info; /* To replace active_mi */
+extern Master_info_index *master_info_index;
+extern LEX_STRING default_master_connection_name;
extern LIST master_list;
extern my_bool replicate_same_server_id;
diff --git a/sql/sp.cc b/sql/sp.cc
index 726d70ffe2a..b80fa968483 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -984,9 +984,6 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
enum_check_fields saved_count_cuted_fields;
bool store_failed= FALSE;
-
- bool save_binlog_row_based;
-
DBUG_ENTER("sp_create_routine");
DBUG_PRINT("enter", ("type: %d name: %.*s", (int) type,
(int) sp->m_name.length,
@@ -1004,14 +1001,6 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
/* Reset sql_mode during data dictionary operations. */
thd->variables.sql_mode= 0;
- /*
- This statement will be replicated as a statement, even when using
- row-based replication. The flag will be reset at the end of the
- statement.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
saved_count_cuted_fields= thd->count_cuted_fields;
thd->count_cuted_fields= CHECK_FIELD_WARN;
@@ -1217,10 +1206,7 @@ sp_create_routine(THD *thd, stored_procedure_type type, sp_head *sp)
done:
thd->count_cuted_fields= saved_count_cuted_fields;
thd->variables.sql_mode= saved_mode;
- /* Restore the state of binlog format */
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(ret);
}
@@ -1245,7 +1231,6 @@ sp_drop_routine(THD *thd, stored_procedure_type type, sp_name *name)
{
TABLE *table;
int ret;
- bool save_binlog_row_based;
MDL_key::enum_mdl_namespace mdl_type= type == TYPE_ENUM_FUNCTION ?
MDL_key::FUNCTION : MDL_key::PROCEDURE;
DBUG_ENTER("sp_drop_routine");
@@ -1267,9 +1252,6 @@ sp_drop_routine(THD *thd, stored_procedure_type type, sp_name *name)
row-based replication. The flag will be reset at the end of the
statement.
*/
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
if ((ret= db_find_routine_aux(thd, type, name, table)) == SP_OK)
{
if (table->file->ha_delete_row(table->record[0]))
@@ -1296,10 +1278,7 @@ sp_drop_routine(THD *thd, stored_procedure_type type, sp_name *name)
sp_cache_flush_obsolete(spc, &sp);
}
}
- /* Restore the state of binlog format */
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(ret);
}
@@ -1327,7 +1306,6 @@ sp_update_routine(THD *thd, stored_procedure_type type, sp_name *name,
{
TABLE *table;
int ret;
- bool save_binlog_row_based;
MDL_key::enum_mdl_namespace mdl_type= type == TYPE_ENUM_FUNCTION ?
MDL_key::FUNCTION : MDL_key::PROCEDURE;
DBUG_ENTER("sp_update_routine");
@@ -1345,14 +1323,6 @@ sp_update_routine(THD *thd, stored_procedure_type type, sp_name *name,
if (!(table= open_proc_table_for_update(thd)))
DBUG_RETURN(SP_OPEN_TABLE_FAILED);
- /*
- This statement will be replicated as a statement, even when using
- row-based replication. The flag will be reset at the end of the
- statement.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
if ((ret= db_find_routine_aux(thd, type, name, table)) == SP_OK)
{
if (type == TYPE_ENUM_FUNCTION && ! trust_function_creators &&
@@ -1380,7 +1350,6 @@ sp_update_routine(THD *thd, stored_procedure_type type, sp_name *name,
}
store_record(table,record[1]);
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
((Field_timestamp *)table->field[MYSQL_PROC_FIELD_MODIFIED])->set_time();
if (chistics->suid != SP_IS_DEFAULT_SUID)
table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]->
@@ -1406,10 +1375,7 @@ sp_update_routine(THD *thd, stored_procedure_type type, sp_name *name,
sp_cache_invalidate();
}
err:
- /* Restore the state of binlog format */
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(ret);
}
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index be6dc935dcc..cb9c0325845 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -218,6 +218,7 @@ sp_get_flags_for_command(LEX *lex)
case SQLCOM_SHOW_CREATE_TRIGGER:
case SQLCOM_SHOW_DATABASES:
case SQLCOM_SHOW_ERRORS:
+ case SQLCOM_SHOW_EXPLAIN:
case SQLCOM_SHOW_FIELDS:
case SQLCOM_SHOW_FUNC_CODE:
case SQLCOM_SHOW_GRANTS:
@@ -509,7 +510,7 @@ sp_head::operator new(size_t size) throw()
MEM_ROOT own_root;
sp_head *sp;
- init_sql_alloc(&own_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC);
+ init_sql_alloc(&own_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC, MYF(0));
sp= (sp_head *) alloc_root(&own_root, size);
if (sp == NULL)
DBUG_RETURN(NULL);
@@ -593,7 +594,7 @@ sp_head::init(LEX *lex)
types of stored procedures to simplify reset_lex()/restore_lex() code.
*/
lex->trg_table_fields.empty();
- my_init_dynamic_array(&m_instr, sizeof(sp_instr *), 16, 8);
+ my_init_dynamic_array(&m_instr, sizeof(sp_instr *), 16, 8, MYF(0));
m_param_begin= NULL;
m_param_end= NULL;
@@ -1253,7 +1254,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
DBUG_RETURN(TRUE);
/* init per-instruction memroot */
- init_sql_alloc(&execute_mem_root, MEM_ROOT_BLOCK_SIZE, 0);
+ init_sql_alloc(&execute_mem_root, MEM_ROOT_BLOCK_SIZE, 0, MYF(0));
DBUG_ASSERT(!(m_flags & IS_INVOKED));
m_flags|= IS_INVOKED;
@@ -1710,7 +1711,7 @@ sp_head::execute_trigger(THD *thd,
TODO: we should create sp_rcontext once per command and reuse it
on subsequent executions of a trigger.
*/
- init_sql_alloc(&call_mem_root, MEM_ROOT_BLOCK_SIZE, 0);
+ init_sql_alloc(&call_mem_root, MEM_ROOT_BLOCK_SIZE, 0, MYF(0));
thd->set_n_backup_active_arena(&call_arena, &backup_arena);
if (!(nctx= new sp_rcontext(m_pcont, 0, octx)) ||
@@ -1827,7 +1828,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
TODO: we should create sp_rcontext once per command and reuse
it on subsequent executions of a function/trigger.
*/
- init_sql_alloc(&call_mem_root, MEM_ROOT_BLOCK_SIZE, 0);
+ init_sql_alloc(&call_mem_root, MEM_ROOT_BLOCK_SIZE, 0, MYF(0));
thd->set_n_backup_active_arena(&call_arena, &backup_arena);
if (!(nctx= new sp_rcontext(m_pcont, return_value_fld, octx)) ||
@@ -2426,7 +2427,6 @@ sp_head::fill_field_definition(THD *thd, LEX *lex,
{
LEX_STRING cmt = { 0, 0 };
uint unused1= 0;
- int unused2= 0;
if (field_def->init(thd, (char*) "", field_type, lex->length, lex->dec,
lex->type, (Item*) 0, (Item*) 0, &cmt, 0,
@@ -2443,8 +2443,7 @@ sp_head::fill_field_definition(THD *thd, LEX *lex,
sp_prepare_create_field(thd, field_def);
- if (prepare_create_field(field_def, &unused1, &unused2, &unused2,
- HA_CAN_GEOMETRY))
+ if (prepare_create_field(field_def, &unused1, HA_CAN_GEOMETRY))
{
return TRUE;
}
diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc
index 4c5087eaf27..f11daeecb7b 100644
--- a/sql/sp_pcontext.cc
+++ b/sql/sp_pcontext.cc
@@ -61,20 +61,20 @@ sp_pcontext::sp_pcontext()
m_label_scope(LABEL_DEFAULT_SCOPE)
{
(void) my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC);
+ PCONTEXT_ARRAY_INIT_ALLOC,
+ PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
(void) my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC);
+ PCONTEXT_ARRAY_INIT_ALLOC,
+ PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
(void) my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC);
+ PCONTEXT_ARRAY_INIT_ALLOC,
+ PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
(void) my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC);
+ PCONTEXT_ARRAY_INIT_ALLOC,
+ PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
(void) my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC);
+ PCONTEXT_ARRAY_INIT_ALLOC,
+ PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
m_label.empty();
m_children.empty();
@@ -89,20 +89,20 @@ sp_pcontext::sp_pcontext(sp_pcontext *prev, label_scope_type label_scope)
m_label_scope(label_scope)
{
(void) my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC);
+ PCONTEXT_ARRAY_INIT_ALLOC,
+ PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
(void) my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC);
+ PCONTEXT_ARRAY_INIT_ALLOC,
+ PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
(void) my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC);
+ PCONTEXT_ARRAY_INIT_ALLOC,
+ PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
(void) my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC);
+ PCONTEXT_ARRAY_INIT_ALLOC,
+ PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
(void) my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC);
+ PCONTEXT_ARRAY_INIT_ALLOC,
+ PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
m_label.empty();
m_children.empty();
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 04d797b3ba1..2f854251ce5 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -701,7 +701,7 @@ my_bool acl_init(bool dont_read_acl_tables)
return_val= acl_reload(thd);
delete thd;
/* Remember that we don't have a THD */
- my_pthread_setspecific_ptr(THR_THD, 0);
+ set_current_thd(0);
DBUG_RETURN(return_val);
}
@@ -762,13 +762,13 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
acl_cache->clear(1); // Clear locked hostname cache
- init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
+ init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0, MYF(0));
if (init_read_record(&read_record_info,thd,table= tables[0].table,NULL,1,0,
FALSE))
goto end;
table->use_all_columns();
- (void) my_init_dynamic_array(&acl_hosts,sizeof(ACL_HOST),20,50);
+ (void) my_init_dynamic_array(&acl_hosts,sizeof(ACL_HOST), 20, 50, MYF(0));
while (!(read_record_info.read_record(&read_record_info)))
{
ACL_HOST host;
@@ -825,7 +825,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
goto end;
table->use_all_columns();
- (void) my_init_dynamic_array(&acl_users,sizeof(ACL_USER),50,100);
+ (void) my_init_dynamic_array(&acl_users,sizeof(ACL_USER), 50, 100, MYF(0));
password_length= table->field[2]->field_length /
table->field[2]->charset()->mbmaxlen;
if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
@@ -1027,7 +1027,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
goto end;
table->use_all_columns();
- (void) my_init_dynamic_array(&acl_dbs,sizeof(ACL_DB),50,100);
+ (void) my_init_dynamic_array(&acl_dbs,sizeof(ACL_DB), 50, 100, MYF(0));
while (!(read_record_info.read_record(&read_record_info)))
{
ACL_DB db;
@@ -1090,7 +1090,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
freeze_size(&acl_dbs);
(void) my_init_dynamic_array(&acl_proxy_users, sizeof(ACL_PROXY_USER),
- 50, 100);
+ 50, 100, MYF(0));
if (tables[3].table)
{
init_read_record(&read_record_info, thd, table= tables[3].table, NULL, 1,
@@ -1745,7 +1745,7 @@ static void init_check_host(void)
{
DBUG_ENTER("init_check_host");
(void) my_init_dynamic_array(&acl_wild_hosts,sizeof(struct acl_host_and_ip),
- acl_users.elements,1);
+ acl_users.elements, 1, MYF(0));
(void) my_hash_init(&acl_check_hosts,system_charset_info,
acl_users.elements, 0, 0,
(my_hash_get_key) check_get_key, 0, 0);
@@ -1906,7 +1906,7 @@ bool change_password(THD *thd, const char *host, const char *user,
/* Buffer should be extended when password length is extended. */
char buff[512];
ulong query_length;
- bool save_binlog_row_based;
+ enum_binlog_format save_binlog_format;
uint new_password_len= (uint) strlen(new_password);
bool result= 1;
DBUG_ENTER("change_password");
@@ -1943,9 +1943,10 @@ bool change_password(THD *thd, const char *host, const char *user,
This statement will be replicated as a statement, even when using
row-based replication. The flag will be reset at the end of the
statement.
+ This has to be handled here as it's called by set_var.cc, which is
+ not automaticly handled by sql_parse.cc
*/
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
+ save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
mysql_mutex_lock(&acl_cache->lock);
ACL_USER *acl_user;
@@ -1994,11 +1995,7 @@ bool change_password(THD *thd, const char *host, const char *user,
}
end:
close_mysql_tables(thd);
-
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
+ thd->restore_stmt_binlog_format(save_binlog_format);
DBUG_RETURN(result);
}
@@ -3594,7 +3591,6 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
TABLE_LIST tables[3];
bool create_new_users=0;
char *db_name, *table_name;
- bool save_binlog_row_based;
DBUG_ENTER("mysql_table_grant");
if (!initialized)
@@ -3684,14 +3680,6 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
if (column_priv || (revoke_grant && ((rights & COL_ACLS) || columns.elements)))
tables[1].next_local= tables[1].next_global= tables+2;
- /*
- This statement will be replicated as a statement, even when using
- row-based replication. The flag will be reset at the end of the
- statement.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
#ifdef HAVE_REPLICATION
/*
GRANT and REVOKE are applied the slave in/exclusion rules as they are
@@ -3705,13 +3693,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
*/
tables[0].updating= tables[1].updating= tables[2].updating= 1;
if (!(thd->spcont || rpl_filter->tables_ok(0, tables)))
- {
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(FALSE);
- }
}
#endif
@@ -3729,11 +3711,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
thd->lex->sql_command= backup.sql_command;
if (open_and_lock_tables(thd, tables, FALSE, MYSQL_LOCK_IGNORE_TIMEOUT))
{ // Should never happen
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
thd->lex->restore_backup_query_tables_list(&backup);
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(TRUE); /* purecov: deadcode */
}
@@ -3861,9 +3839,6 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
/* Tables are automatically closed */
thd->lex->restore_backup_query_tables_list(&backup);
/* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(result);
}
@@ -3892,7 +3867,6 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
TABLE_LIST tables[2];
bool create_new_users=0, result=0;
char *db_name, *table_name;
- bool save_binlog_row_based;
DBUG_ENTER("mysql_routine_grant");
if (!initialized)
@@ -3922,14 +3896,6 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
C_STRING_WITH_LEN("procs_priv"), "procs_priv", TL_WRITE);
tables[0].next_local= tables[0].next_global= tables+1;
- /*
- This statement will be replicated as a statement, even when using
- row-based replication. The flag will be reset at the end of the
- statement.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
#ifdef HAVE_REPLICATION
/*
GRANT and REVOKE are applied the slave in/exclusion rules as they are
@@ -3944,23 +3910,15 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
tables[0].updating= tables[1].updating= 1;
if (!(thd->spcont || rpl_filter->tables_ok(0, tables)))
{
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(FALSE);
}
}
#endif
if (open_and_lock_tables(thd, tables, FALSE, MYSQL_LOCK_IGNORE_TIMEOUT))
- { // Should never happen
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(TRUE);
- }
+
+ DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
if (!revoke_grant)
create_new_users= test_if_create_new_users(thd);
@@ -4035,10 +3993,6 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc,
}
mysql_rwlock_unlock(&LOCK_grant);
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
/* Tables are automatically closed */
DBUG_RETURN(result);
@@ -4053,8 +4007,8 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
char tmp_db[SAFE_NAME_LEN+1];
bool create_new_users=0;
TABLE_LIST tables[2];
- bool save_binlog_row_based;
DBUG_ENTER("mysql_grant");
+
if (!initialized)
{
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0),
@@ -4096,14 +4050,6 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
TL_WRITE);
tables[0].next_local= tables[0].next_global= tables+1;
- /*
- This statement will be replicated as a statement, even when using
- row-based replication. The flag will be reset at the end of the
- statement.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
#ifdef HAVE_REPLICATION
/*
GRANT and REVOKE are applied the slave in/exclusion rules as they are
@@ -4117,24 +4063,14 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
*/
tables[0].updating= tables[1].updating= 1;
if (!(thd->spcont || rpl_filter->tables_ok(0, tables)))
- {
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(FALSE);
- }
}
#endif
if (open_and_lock_tables(thd, tables, FALSE, MYSQL_LOCK_IGNORE_TIMEOUT))
- { // This should never happen
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(TRUE); /* purecov: deadcode */
- }
+
+ DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
if (!revoke_grant)
create_new_users= test_if_create_new_users(thd);
@@ -4198,10 +4134,6 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
if (!result)
my_ok(thd);
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(result);
}
@@ -4242,7 +4174,7 @@ my_bool grant_init()
return_val= grant_reload(thd);
delete thd;
/* Remember that we don't have a THD */
- my_pthread_setspecific_ptr(THR_THD, 0);
+ set_current_thd(0);
DBUG_RETURN(return_val);
}
@@ -4540,7 +4472,7 @@ my_bool grant_reload(THD *thd)
opertion possible in case of failure.
*/
old_mem= memex;
- init_sql_alloc(&memex, ACL_ALLOC_BLOCK_SIZE, 0);
+ init_sql_alloc(&memex, ACL_ALLOC_BLOCK_SIZE, 0, MYF(0));
if ((return_val= grant_load(thd, tables)))
{ // Error. Revert to old hash
@@ -6524,26 +6456,11 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list)
List_iterator <LEX_USER> user_list(list);
TABLE_LIST tables[GRANT_TABLES];
bool some_users_created= FALSE;
- bool save_binlog_row_based;
DBUG_ENTER("mysql_create_user");
- /*
- This statement will be replicated as a statement, even when using
- row-based replication. The flag will be reset at the end of the
- statement.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
/* CREATE USER may be skipped on replication client. */
if ((result= open_grant_tables(thd, tables)))
- {
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(result != 1);
- }
mysql_rwlock_wrlock(&LOCK_grant);
mysql_mutex_lock(&acl_cache->lock);
@@ -6584,10 +6501,6 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list)
result |= write_bin_log(thd, FALSE, thd->query(), thd->query_length());
mysql_rwlock_unlock(&LOCK_grant);
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(result);
}
@@ -6614,26 +6527,11 @@ bool mysql_drop_user(THD *thd, List <LEX_USER> &list)
TABLE_LIST tables[GRANT_TABLES];
bool some_users_deleted= FALSE;
ulonglong old_sql_mode= thd->variables.sql_mode;
- bool save_binlog_row_based;
DBUG_ENTER("mysql_drop_user");
- /*
- This statement will be replicated as a statement, even when using
- row-based replication. The flag will be reset at the end of the
- statement.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
/* DROP USER may be skipped on replication client. */
if ((result= open_grant_tables(thd, tables)))
- {
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(result != 1);
- }
thd->variables.sql_mode&= ~MODE_PAD_CHAR_TO_FULL_LENGTH;
@@ -6669,10 +6567,6 @@ bool mysql_drop_user(THD *thd, List <LEX_USER> &list)
mysql_rwlock_unlock(&LOCK_grant);
thd->variables.sql_mode= old_sql_mode;
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(result);
}
@@ -6699,26 +6593,13 @@ bool mysql_rename_user(THD *thd, List <LEX_USER> &list)
List_iterator <LEX_USER> user_list(list);
TABLE_LIST tables[GRANT_TABLES];
bool some_users_renamed= FALSE;
- bool save_binlog_row_based;
DBUG_ENTER("mysql_rename_user");
- /*
- This statement will be replicated as a statement, even when using
- row-based replication. The flag will be reset at the end of the
- statement.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
/* RENAME USER may be skipped on replication client. */
if ((result= open_grant_tables(thd, tables)))
- {
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(result != 1);
- }
+
+ DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
mysql_rwlock_wrlock(&LOCK_grant);
mysql_mutex_lock(&acl_cache->lock);
@@ -6764,10 +6645,6 @@ bool mysql_rename_user(THD *thd, List <LEX_USER> &list)
result |= write_bin_log(thd, FALSE, thd->query(), thd->query_length());
mysql_rwlock_unlock(&LOCK_grant);
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(result);
}
@@ -6792,25 +6669,12 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
int result;
ACL_DB *acl_db;
TABLE_LIST tables[GRANT_TABLES];
- bool save_binlog_row_based;
DBUG_ENTER("mysql_revoke_all");
- /*
- This statement will be replicated as a statement, even when using
- row-based replication. The flag will be reset at the end of the
- statement.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
if ((result= open_grant_tables(thd, tables)))
- {
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(result != 1);
- }
+
+ DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
mysql_rwlock_wrlock(&LOCK_grant);
mysql_mutex_lock(&acl_cache->lock);
@@ -6961,10 +6825,6 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
write_bin_log(thd, FALSE, thd->query(), thd->query_length());
mysql_rwlock_unlock(&LOCK_grant);
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(result);
}
@@ -7056,26 +6916,19 @@ bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name,
TABLE_LIST tables[GRANT_TABLES];
HASH *hash= is_proc ? &proc_priv_hash : &func_priv_hash;
Silence_routine_definer_errors error_handler;
- bool save_binlog_row_based;
DBUG_ENTER("sp_revoke_privileges");
if ((result= open_grant_tables(thd, tables)))
DBUG_RETURN(result != 1);
+ DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
+
/* Be sure to pop this before exiting this scope! */
thd->push_internal_handler(&error_handler);
mysql_rwlock_wrlock(&LOCK_grant);
mysql_mutex_lock(&acl_cache->lock);
- /*
- This statement will be replicated as a statement, even when using
- row-based replication. The flag will be reset at the end of the
- statement.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
/* Remove procedure access */
do
{
@@ -7109,10 +6962,6 @@ bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name,
mysql_rwlock_unlock(&LOCK_grant);
thd->pop_internal_handler();
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(error_handler.has_errors());
}
@@ -7231,17 +7080,6 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name,
}
-/*****************************************************************************
- Instantiate used templates
-*****************************************************************************/
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class List_iterator<LEX_COLUMN>;
-template class List_iterator<LEX_USER>;
-template class List<LEX_COLUMN>;
-template class List<LEX_USER>;
-#endif
-
/**
Validate if a user can proxy as another user
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index c65e56edbe0..e6bbef482a7 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -27,7 +27,9 @@
#include "sql_acl.h" // *_ACL
#include "sp.h" // Sroutine_hash_entry
#include "sql_parse.h" // check_table_access
+#include "strfunc.h"
#include "sql_admin.h"
+#include "sql_statistics.h"
/* Prepare, run and cleanup for mysql_recreate_table() */
@@ -320,7 +322,9 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
Protocol *protocol= thd->protocol;
LEX *lex= thd->lex;
int result_code;
+ int compl_result_code;
bool need_repair_or_alter= 0;
+
DBUG_ENTER("mysql_admin_table");
DBUG_PRINT("enter", ("extra_open_options: %u", extra_open_options));
@@ -640,9 +644,92 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
}
}
- DBUG_PRINT("admin", ("calling operator_func '%s'", operator_name));
- result_code = (table->table->file->*operator_func)(thd, check_opt);
- DBUG_PRINT("admin", ("operator_func returned: %d", result_code));
+ result_code= compl_result_code= HA_ADMIN_OK;
+
+ if (operator_func == &handler::ha_analyze)
+ {
+ TABLE *tab= table->table;
+ Field **field_ptr= tab->field;
+
+ if (lex->with_persistent_for_clause &&
+ tab->s->table_category != TABLE_CATEGORY_USER)
+ {
+ compl_result_code= result_code= HA_ADMIN_INVALID;
+ }
+
+ if (!lex->column_list)
+ {
+ uint fields= 0;
+ for ( ; *field_ptr; field_ptr++, fields++) ;
+ bitmap_set_prefix(tab->read_set, fields);
+ }
+ else
+ {
+ int pos;
+ LEX_STRING *column_name;
+ List_iterator_fast<LEX_STRING> it(*lex->column_list);
+
+ bitmap_clear_all(tab->read_set);
+ while ((column_name= it++))
+ {
+ if (tab->s->fieldnames.type_names == 0 ||
+ (pos= find_type(&tab->s->fieldnames, column_name->str,
+ column_name->length, 1)) <= 0)
+ {
+ compl_result_code= result_code= HA_ADMIN_INVALID;
+ break;
+ }
+ bitmap_set_bit(tab->read_set, pos-1);
+ }
+ tab->file->column_bitmaps_signal();
+ }
+
+ if (!lex->index_list)
+ {
+ tab->keys_in_use_for_query.init(tab->s->keys);
+ }
+ else
+ {
+ int pos;
+ LEX_STRING *index_name;
+ List_iterator_fast<LEX_STRING> it(*lex->index_list);
+
+ tab->keys_in_use_for_query.clear_all();
+ while ((index_name= it++))
+ {
+ if (tab->s->keynames.type_names == 0 ||
+ (pos= find_type(&tab->s->keynames, index_name->str,
+ index_name->length, 1)) <= 0)
+ {
+ compl_result_code= result_code= HA_ADMIN_INVALID;
+ break;
+ }
+ tab->keys_in_use_for_query.set_bit(--pos);
+ }
+ }
+ }
+
+ if (result_code == HA_ADMIN_OK)
+ {
+ DBUG_PRINT("admin", ("calling operator_func '%s'", operator_name));
+ result_code = (table->table->file->*operator_func)(thd, check_opt);
+ DBUG_PRINT("admin", ("operator_func returned: %d", result_code));
+ }
+
+ if (compl_result_code == HA_ADMIN_OK &&
+ operator_func == &handler::ha_analyze &&
+ table->table->s->table_category == TABLE_CATEGORY_USER &&
+ (get_use_stat_tables_mode(thd) > NEVER ||
+ lex->with_persistent_for_clause))
+ {
+ if (!(compl_result_code=
+ alloc_statistics_for_table(thd, table->table)) &&
+ !(compl_result_code=
+ collect_statistics_for_table(thd, table->table)))
+ compl_result_code= update_statistics_for_table(thd, table->table);
+ if (compl_result_code)
+ result_code= HA_ADMIN_FAILED;
+ }
if (result_code == HA_ADMIN_NOT_IMPLEMENTED && need_repair_or_alter)
{
diff --git a/sql/sql_analyse.h b/sql/sql_analyse.h
index 34c8e2da3d4..8bac29de5a3 100644
--- a/sql/sql_analyse.h
+++ b/sql/sql_analyse.h
@@ -121,7 +121,8 @@ public:
must_be_blob(0), was_zero_fill(0),
was_maybe_zerofill(0), can_be_still_num(1)
{ init_tree(&tree, 0, 0, sizeof(String), (qsort_cmp2) sortcmp2,
- 0, (tree_element_free) free_string, NULL); };
+ (tree_element_free) free_string, NULL,
+ MYF(MY_THREAD_SPECIFIC)); };
void add();
void get_opt_type(String*, ha_rows);
@@ -162,7 +163,7 @@ public:
{
bin_size= my_decimal_get_binary_size(a->max_length, a->decimals);
init_tree(&tree, 0, 0, bin_size, (qsort_cmp2)compare_decimal2,
- 0, 0, (void *)&bin_size);
+ 0, (void *)&bin_size, MYF(MY_THREAD_SPECIFIC));
};
void add();
@@ -190,7 +191,8 @@ public:
field_real(Item* a, analyse* b) :field_info(a,b),
min_arg(0), max_arg(0), sum(0), sum_sqr(0), max_notzero_dec_len(0)
{ init_tree(&tree, 0, 0, sizeof(double),
- (qsort_cmp2) compare_double2, 0, NULL, NULL); }
+ (qsort_cmp2) compare_double2, NULL, NULL,
+ MYF(MY_THREAD_SPECIFIC)); }
void add();
void get_opt_type(String*, ha_rows);
@@ -244,7 +246,8 @@ public:
field_longlong(Item* a, analyse* b) :field_info(a,b),
min_arg(0), max_arg(0), sum(0), sum_sqr(0)
{ init_tree(&tree, 0, 0, sizeof(longlong),
- (qsort_cmp2) compare_longlong2, 0, NULL, NULL); }
+ (qsort_cmp2) compare_longlong2, NULL, NULL,
+ MYF(MY_THREAD_SPECIFIC)); }
void add();
void get_opt_type(String*, ha_rows);
@@ -289,7 +292,8 @@ public:
field_ulonglong(Item* a, analyse * b) :field_info(a,b),
min_arg(0), max_arg(0), sum(0),sum_sqr(0)
{ init_tree(&tree, 0, 0, sizeof(ulonglong),
- (qsort_cmp2) compare_ulonglong2, 0, NULL, NULL); }
+ (qsort_cmp2) compare_ulonglong2, NULL, NULL,
+ MYF(MY_THREAD_SPECIFIC)); }
void add();
void get_opt_type(String*, ha_rows);
String *get_min_arg(String *s) { s->set(min_arg,my_thd_charset); return s; }
diff --git a/sql/sql_array.h b/sql/sql_array.h
index 67f1f1c2ad7..f07126bc0ef 100644
--- a/sql/sql_array.h
+++ b/sql/sql_array.h
@@ -19,6 +19,77 @@
#include <my_sys.h>
+/**
+ A wrapper class which provides array bounds checking.
+ We do *not* own the array, we simply have a pointer to the first element,
+ and a length.
+
+ @remark
+ We want the compiler-generated versions of:
+ - the copy CTOR (memberwise initialization)
+ - the assignment operator (memberwise assignment)
+
+ @param Element_type The type of the elements of the container.
+ */
+template <typename Element_type> class Bounds_checked_array
+{
+public:
+ Bounds_checked_array() : m_array(NULL), m_size(0) {}
+
+ Bounds_checked_array(Element_type *el, size_t size)
+ : m_array(el), m_size(size)
+ {}
+
+ void reset() { m_array= NULL; m_size= 0; }
+
+ void reset(Element_type *array, size_t size)
+ {
+ m_array= array;
+ m_size= size;
+ }
+
+ /**
+ Set a new bound on the array. Does not resize the underlying
+ array, so the new size must be smaller than or equal to the
+ current size.
+ */
+ void resize(size_t new_size)
+ {
+ DBUG_ASSERT(new_size <= m_size);
+ m_size= new_size;
+ }
+
+ Element_type &operator[](size_t n)
+ {
+ DBUG_ASSERT(n < m_size);
+ return m_array[n];
+ }
+
+ const Element_type &operator[](size_t n) const
+ {
+ DBUG_ASSERT(n < m_size);
+ return m_array[n];
+ }
+
+ size_t element_size() const { return sizeof(Element_type); }
+ size_t size() const { return m_size; }
+
+ bool is_null() const { return m_array == NULL; }
+
+ void pop_front()
+ {
+ DBUG_ASSERT(m_size > 0);
+ m_array+= 1;
+ m_size-= 1;
+ }
+
+ Element_type *array() const { return m_array; }
+
+private:
+ Element_type *m_array;
+ size_t m_size;
+};
+
/*
A typesafe wrapper around DYNAMIC_ARRAY
*/
@@ -29,7 +100,8 @@ template <class Elem> class Dynamic_array
public:
Dynamic_array(uint prealloc=16, uint increment=16)
{
- my_init_dynamic_array(&array, sizeof(Elem), prealloc, increment);
+ my_init_dynamic_array(&array, sizeof(Elem), prealloc, increment,
+ MYF(MY_THREAD_SPECIFIC));
}
Elem& at(int idx)
diff --git a/sql/sql_audit.cc b/sql/sql_audit.cc
index 523f07592bc..793eead9869 100644
--- a/sql/sql_audit.cc
+++ b/sql/sql_audit.cc
@@ -153,7 +153,7 @@ static my_bool acquire_plugins(THD *thd, plugin_ref plugin, void *arg)
{
/* specify some reasonable initialization defaults */
my_init_dynamic_array(&thd->audit_class_plugins,
- sizeof(plugin_ref), 16, 16);
+ sizeof(plugin_ref), 16, 16, MYF(0));
}
/* lock the plugin and add it to the list */
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index c76f2d43279..b3321bd2771 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -49,6 +49,7 @@
#include "sql_trigger.h"
#include "transaction.h"
#include "sql_prepare.h"
+#include "sql_statistics.h"
#include <m_ctype.h>
#include <my_dir.h>
#include <hash.h>
@@ -396,6 +397,7 @@ bool table_def_init(void)
void table_def_start_shutdown(void)
{
+ DBUG_ENTER("table_def_start_shutdown");
if (table_def_inited)
{
mysql_mutex_lock(&LOCK_open);
@@ -410,6 +412,7 @@ void table_def_start_shutdown(void)
/* Free all cached but unused TABLEs and TABLE_SHAREs. */
close_cached_tables(NULL, NULL, FALSE, LONG_TIMEOUT);
}
+ DBUG_VOID_RETURN;
}
@@ -1086,6 +1089,9 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables,
mysql_mutex_unlock(&LOCK_open);
+ DBUG_PRINT("info", ("open table definitions: %d",
+ (int) table_def_cache.records));
+
if (!wait_for_refresh)
DBUG_RETURN(result);
@@ -4633,6 +4639,32 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
goto end;
}
+ if (get_use_stat_tables_mode(thd) > NEVER && tables->table)
+ {
+ TABLE_SHARE *table_share= tables->table->s;
+ if (table_share && table_share->table_category == TABLE_CATEGORY_USER &&
+ table_share->tmp_table == NO_TMP_TABLE)
+ {
+ if (table_share->stats_cb.stats_can_be_read ||
+ !alloc_statistics_for_table_share(thd, table_share, FALSE))
+ {
+ if (table_share->stats_cb.stats_can_be_read)
+ {
+ KEY *key_info= table_share->key_info;
+ KEY *key_info_end= key_info + table_share->keys;
+ KEY *table_key_info= tables->table->key_info;
+ for ( ; key_info < key_info_end; key_info++, table_key_info++)
+ table_key_info->read_stats= key_info->read_stats;
+ Field **field_ptr= table_share->field;
+ Field **table_field_ptr= tables->table->field;
+ for ( ; *field_ptr; field_ptr++, table_field_ptr++)
+ (*table_field_ptr)->read_stats= (*field_ptr)->read_stats;
+ tables->table->stats_is_read= table_share->stats_cb.stats_is_read;
+ }
+ }
+ }
+ }
+
process_view_routines:
/*
Again we may need cache all routines used by this view and add
@@ -4937,11 +4969,11 @@ bool open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags,
}
/*
- Initialize temporary MEM_ROOT for new .FRM parsing. Do not allocate
+ Initialize temporary MEM_ROOT for new .FRM parsing. Do not alloctaate
anything yet, to avoid penalty for statements which don't use views
and thus new .FRM format.
*/
- init_sql_alloc(&new_frm_mem, 8024, 0);
+ init_sql_alloc(&new_frm_mem, 8024, 0, MYF(0));
thd->current_tablenr= 0;
restart:
@@ -5647,6 +5679,8 @@ bool open_and_lock_tables(THD *thd, TABLE_LIST *tables,
if (lock_tables(thd, tables, counter, flags))
goto err;
+ (void) read_statistics_for_tables_if_needed(thd, tables);
+
if (derived)
{
if (mysql_handle_derived(thd->lex, DT_INIT))
@@ -8862,34 +8896,33 @@ err_no_arena:
******************************************************************************/
-/*
- Fill fields with given items.
+/**
+ Fill the fields of a table with the values of an Item list
- SYNOPSIS
- fill_record()
- thd thread handler
- fields Item_fields list to be filled
- values values to fill with
- ignore_errors TRUE if we should ignore errors
+ @param thd thread handler
+ @param table_arg the table that is being modified
+ @param fields Item_fields list to be filled
+ @param values values to fill with
+ @param ignore_errors TRUE if we should ignore errors
- NOTE
+ @details
fill_record() may set table->auto_increment_field_not_null and a
caller should make sure that it is reset after their last call to this
function.
- RETURN
- FALSE OK
- TRUE error occured
+ @return Status
+ @retval true An error occured.
+ @retval false OK.
*/
static bool
-fill_record(THD * thd, List<Item> &fields, List<Item> &values,
+fill_record(THD * thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
bool ignore_errors)
{
List_iterator_fast<Item> f(fields),v(values);
Item *value, *fld;
Item_field *field;
- TABLE *table= 0, *vcol_table= 0;
+ TABLE *vcol_table= 0;
bool save_abort_on_warning= thd->abort_on_warning;
bool save_no_errors= thd->no_errors;
DBUG_ENTER("fill_record");
@@ -8911,12 +8944,13 @@ fill_record(THD * thd, List<Item> &fields, List<Item> &values,
my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name);
goto err;
}
- table= field->field->table;
- table->auto_increment_field_not_null= FALSE;
+ DBUG_ASSERT(field->field->table == table_arg);
+ table_arg->auto_increment_field_not_null= FALSE;
f.rewind();
}
else if (thd->lex->unit.insert_table_with_stored_vcol)
vcol_table= thd->lex->unit.insert_table_with_stored_vcol;
+
while ((fld= f++))
{
if (!(field= fld->filed_for_view_update()))
@@ -8926,7 +8960,7 @@ fill_record(THD * thd, List<Item> &fields, List<Item> &values,
}
value=v++;
Field *rfield= field->field;
- table= rfield->table;
+ TABLE* table= rfield->table;
if (rfield == table->next_number_field)
table->auto_increment_field_not_null= TRUE;
if (rfield->vcol_info &&
@@ -8945,6 +8979,7 @@ fill_record(THD * thd, List<Item> &fields, List<Item> &values,
my_message(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), MYF(0));
goto err;
}
+ rfield->set_explicit_default(value);
DBUG_ASSERT(vcol_table == 0 || vcol_table == table);
vcol_table= table;
}
@@ -8961,8 +8996,8 @@ fill_record(THD * thd, List<Item> &fields, List<Item> &values,
err:
thd->abort_on_warning= save_abort_on_warning;
thd->no_errors= save_no_errors;
- if (table)
- table->auto_increment_field_not_null= FALSE;
+ if (fields.elements)
+ table_arg->auto_increment_field_not_null= FALSE;
DBUG_RETURN(TRUE);
}
@@ -8971,42 +9006,39 @@ err:
Fill fields in list with values from the list of items and invoke
before triggers.
- SYNOPSIS
- fill_record_n_invoke_before_triggers()
- thd thread context
- fields Item_fields list to be filled
- values values to fill with
- ignore_errors TRUE if we should ignore errors
- triggers object holding list of triggers to be invoked
- event event type for triggers to be invoked
+ @param thd thread context
+ @param table the table that is being modified
+ @param fields Item_fields list to be filled
+ @param values values to fill with
+ @param ignore_errors TRUE if we should ignore errors
+ @param event event type for triggers to be invoked
- NOTE
+ @detail
This function assumes that fields which values will be set and triggers
to be invoked belong to the same table, and that TABLE::record[0] and
record[1] buffers correspond to new and old versions of row respectively.
- RETURN
- FALSE OK
- TRUE error occured
+ @return Status
+ @retval true An error occured.
+ @retval false OK.
*/
bool
-fill_record_n_invoke_before_triggers(THD *thd, List<Item> &fields,
+fill_record_n_invoke_before_triggers(THD *thd, TABLE *table, List<Item> &fields,
List<Item> &values, bool ignore_errors,
- Table_triggers_list *triggers,
enum trg_event_type event)
{
bool result;
- result= (fill_record(thd, fields, values, ignore_errors) ||
+ Table_triggers_list *triggers= table->triggers;
+ result= (fill_record(thd, table, fields, values, ignore_errors) ||
(triggers && triggers->process_triggers(thd, event,
TRG_ACTION_BEFORE, TRUE)));
/*
Re-calculate virtual fields to cater for cases when base columns are
updated by the triggers.
*/
- if (!result && triggers)
+ if (!result && triggers && table)
{
- TABLE *table= 0;
List_iterator_fast<Item> f(fields);
Item *fld;
Item_field *item_field;
@@ -9014,47 +9046,46 @@ fill_record_n_invoke_before_triggers(THD *thd, List<Item> &fields,
{
fld= (Item_field*)f++;
item_field= fld->filed_for_view_update();
- if (item_field && item_field->field &&
- (table= item_field->field->table) &&
- table->vfield)
+ if (item_field && item_field->field && table && table->vfield)
+ {
+ DBUG_ASSERT(table == item_field->field->table);
result= update_virtual_fields(thd, table,
table->triggers ? VCOL_UPDATE_ALL :
VCOL_UPDATE_FOR_WRITE);
+ }
}
}
return result;
}
-/*
- Fill field buffer with values from Field list
+/**
+ Fill the field buffer of a table with the values of an Item list
- SYNOPSIS
- fill_record()
- thd thread handler
- ptr pointer on pointer to record
- values list of fields
- ignore_errors TRUE if we should ignore errors
- use_value forces usage of value of the items instead of result
+ @param thd thread handler
+ @param table_arg the table that is being modified
+ @param ptr pointer on pointer to record of fields
+ @param values values to fill with
+ @param ignore_errors TRUE if we should ignore errors
+ @param use_value forces usage of value of the items instead of result
- NOTE
+ @details
fill_record() may set table->auto_increment_field_not_null and a
caller should make sure that it is reset after their last call to this
function.
- RETURN
- FALSE OK
- TRUE error occured
+ @return Status
+ @retval true An error occured.
+ @retval false OK.
*/
bool
-fill_record(THD *thd, Field **ptr, List<Item> &values, bool ignore_errors,
- bool use_value)
+fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values,
+ bool ignore_errors, bool use_value)
{
List_iterator_fast<Item> v(values);
List<TABLE> tbl_list;
Item *value;
- TABLE *table= 0;
Field *field;
bool abort_on_warning_saved= thd->abort_on_warning;
DBUG_ENTER("fill_record");
@@ -9069,7 +9100,7 @@ fill_record(THD *thd, Field **ptr, List<Item> &values, bool ignore_errors,
On INSERT or UPDATE fields are checked to be from the same table,
thus we safely can take table from the first field.
*/
- table= (*ptr)->table;
+ DBUG_ASSERT((*ptr)->table == table);
/*
Reset the table->auto_increment_field_not_null as it is valid for
@@ -9100,6 +9131,7 @@ fill_record(THD *thd, Field **ptr, List<Item> &values, bool ignore_errors,
else
if (value->save_in_field(field, 0) < 0)
goto err;
+ field->set_explicit_default(value);
}
/* Update virtual fields*/
thd->abort_on_warning= FALSE;
@@ -9119,36 +9151,34 @@ err:
/*
- Fill fields in array with values from the list of items and invoke
+ Fill fields in an array with values from the list of items and invoke
before triggers.
- SYNOPSIS
- fill_record_n_invoke_before_triggers()
- thd thread context
- ptr NULL-ended array of fields to be filled
- values values to fill with
- ignore_errors TRUE if we should ignore errors
- triggers object holding list of triggers to be invoked
- event event type for triggers to be invoked
+ @param thd thread context
+ @param table the table that is being modified
+ @param ptr the fields to be filled
+ @param values values to fill with
+ @param ignore_errors TRUE if we should ignore errors
+ @param event event type for triggers to be invoked
- NOTE
+ @detail
This function assumes that fields which values will be set and triggers
to be invoked belong to the same table, and that TABLE::record[0] and
record[1] buffers correspond to new and old versions of row respectively.
- RETURN
- FALSE OK
- TRUE error occured
+ @return Status
+ @retval true An error occured.
+ @retval false OK.
*/
bool
-fill_record_n_invoke_before_triggers(THD *thd, Field **ptr,
+fill_record_n_invoke_before_triggers(THD *thd, TABLE *table, Field **ptr,
List<Item> &values, bool ignore_errors,
- Table_triggers_list *triggers,
enum trg_event_type event)
{
bool result;
- result= (fill_record(thd, ptr, values, ignore_errors, FALSE) ||
+ Table_triggers_list *triggers= table->triggers;
+ result= (fill_record(thd, table, ptr, values, ignore_errors, FALSE) ||
(triggers && triggers->process_triggers(thd, event,
TRG_ACTION_BEFORE, TRUE)));
/*
@@ -9157,7 +9187,7 @@ fill_record_n_invoke_before_triggers(THD *thd, Field **ptr,
*/
if (!result && triggers && *ptr)
{
- TABLE *table= (*ptr)->table;
+ DBUG_ASSERT(table == (*ptr)->table);
if (table->vfield)
result= update_virtual_fields(thd, table,
table->triggers ? VCOL_UPDATE_ALL :
@@ -9236,7 +9266,7 @@ my_bool mysql_rm_tmp_tables(void)
my_dirend(dirp);
}
delete thd;
- my_pthread_setspecific_ptr(THR_THD, 0);
+ set_current_thd(0);
DBUG_RETURN(0);
}
@@ -9650,6 +9680,12 @@ has_write_table_auto_increment_not_first_in_pk(TABLE_LIST *tables)
must call close_system_tables() to close systems tables opened
with this call.
+ NOTES
+ In some situations we use this function to open system tables for
+ writing. It happens, for examples, with statistical tables when
+ they are updated by an ANALYZE command. In these cases we should
+ guarantee that system tables will not be deadlocked.
+
RETURN
FALSE Success
TRUE Error
@@ -9803,11 +9839,6 @@ open_log_table(THD *thd, TABLE_LIST *one_table, Open_tables_backup *backup)
/* Make sure all columns get assigned to a default value */
table->use_all_columns();
table->no_replicate= 1;
- /*
- Don't set automatic timestamps as we may want to use time of logging,
- not from query start
- */
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
}
else
thd->restore_backup_open_tables_state(backup);
@@ -9863,6 +9894,7 @@ int dynamic_column_error_message(enum_dyncol_func_result rc)
switch (rc) {
case ER_DYNCOL_YES:
case ER_DYNCOL_OK:
+ case ER_DYNCOL_TRUNCATED:
break; // it is not an error
case ER_DYNCOL_FORMAT:
my_error(ER_DYN_COL_WRONG_FORMAT, MYF(0));
diff --git a/sql/sql_base.h b/sql/sql_base.h
index 45d41777fea..78ab8c7df24 100644
--- a/sql/sql_base.h
+++ b/sql/sql_base.h
@@ -170,15 +170,15 @@ TABLE *find_temporary_table(THD *thd, const TABLE_LIST *tl);
TABLE *find_temporary_table(THD *thd, const char *table_key,
uint table_key_length);
void close_thread_tables(THD *thd);
-bool fill_record_n_invoke_before_triggers(THD *thd, List<Item> &fields,
+bool fill_record_n_invoke_before_triggers(THD *thd, TABLE *table,
+ List<Item> &fields,
List<Item> &values,
bool ignore_errors,
- Table_triggers_list *triggers,
enum trg_event_type event);
-bool fill_record_n_invoke_before_triggers(THD *thd, Field **field,
+bool fill_record_n_invoke_before_triggers(THD *thd, TABLE *table,
+ Field **field,
List<Item> &values,
bool ignore_errors,
- Table_triggers_list *triggers,
enum trg_event_type event);
bool insert_fields(THD *thd, Name_resolution_context *context,
const char *db_name, const char *table_name,
@@ -191,7 +191,7 @@ bool setup_fields(THD *thd, Item** ref_pointer_array,
List<Item> &item, enum_mark_columns mark_used_columns,
List<Item> *sum_func_list, bool allow_sum_func);
void unfix_fields(List<Item> &items);
-bool fill_record(THD *thd, Field **field, List<Item> &values,
+bool fill_record(THD *thd, TABLE *table, Field **field, List<Item> &values,
bool ignore_errors, bool use_value);
Field *
@@ -272,6 +272,7 @@ bool rename_temporary_table(THD* thd, TABLE *table, const char *new_db,
const char *table_name);
bool is_equal(const LEX_STRING *a, const LEX_STRING *b);
+class Open_tables_backup;
/* Functions to work with system tables. */
bool open_system_tables_for_read(THD *thd, TABLE_LIST *table_list,
Open_tables_backup *backup);
@@ -308,6 +309,14 @@ int update_virtual_fields(THD *thd, TABLE *table,
enum enum_vcol_update_mode vcol_update_mode= VCOL_UPDATE_FOR_READ);
int dynamic_column_error_message(enum_dyncol_func_result rc);
+/* open_and_lock_tables with optional derived handling */
+int open_and_lock_tables_derived(THD *thd, TABLE_LIST *tables, bool derived);
+
+extern "C" int simple_raw_key_cmp(void* arg, const void* key1,
+ const void* key2);
+extern "C" int count_distinct_walk(void *elem, element_count count, void *arg);
+int simple_str_key_cmp(void* arg, uchar* key1, uchar* key2);
+
extern TABLE *unused_tables;
extern Item **not_found_item;
extern Field *not_found_field;
@@ -474,7 +483,6 @@ open_tables(THD *thd, TABLE_LIST **tables, uint *counter, uint flags)
return open_tables(thd, tables, counter, flags, &prelocking_strategy);
}
-
inline TABLE *open_n_lock_single_table(THD *thd, TABLE_LIST *table_l,
thr_lock_type lock_type, uint flags)
{
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 191cc4e01c8..ddc75254c9e 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -1035,8 +1035,8 @@ void query_cache_insert(const char *packet, ulong length,
/*
Current_thd can be NULL when a new connection is immediately ended
due to "Too many connections". thd->store_globals() has not been
- called at this time and hence my_pthread_setspecific_ptr(THR_THD,
- this) has not been called for this thread.
+ called at this time and hence set_current_thd(this) has not been
+ called for this thread.
*/
if (!thd)
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index df71b78ada0..7577056c3c5 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -73,23 +73,6 @@ char empty_c_string[1]= {0}; /* used for not defined db */
const char * const THD::DEFAULT_WHERE= "field list";
-
-/*****************************************************************************
-** Instansiate templates
-*****************************************************************************/
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-/* Used templates */
-template class List<Key>;
-template class List_iterator<Key>;
-template class List<Key_part_spec>;
-template class List_iterator<Key_part_spec>;
-template class List<Alter_drop>;
-template class List_iterator<Alter_drop>;
-template class List<Alter_column>;
-template class List_iterator<Alter_column>;
-#endif
-
/****************************************************************************
** User variables
****************************************************************************/
@@ -811,6 +794,7 @@ THD::THD()
accessed_rows_and_keys(0),
warning_info(&main_warning_info),
stmt_da(&main_da),
+ thread_id(0),
global_disable_checkpoint(0),
failed_com_change_user(0),
is_fatal_error(0),
@@ -826,17 +810,28 @@ THD::THD()
#if defined(ENABLED_DEBUG_SYNC)
debug_sync_control(0),
#endif /* defined(ENABLED_DEBUG_SYNC) */
- main_warning_info(0, false)
+ main_warning_info(0, false, false)
{
ulong tmp;
mdl_context.init(this);
/*
+ We set THR_THD to temporally point to this THD to register all the
+ variables that allocates memory for this THD
+ */
+ THD *old_THR_THD= current_thd;
+ set_current_thd(this);
+ status_var.memory_used= 0;
+
+ main_warning_info.init();
+ /*
Pass nominal parameters to init_alloc_root only to ensure that
the destructor works OK in case of an error. The main_mem_root
will be re-initialized in init_for_queries().
*/
- init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
+ init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0,
+ MYF(MY_THREAD_SPECIFIC));
+
stmt_arena= this;
thread_stack= 0;
scheduler= thread_scheduler; // Will be fixed later
@@ -871,8 +866,10 @@ THD::THD()
progress.max_counter= 0;
current_linfo = 0;
slave_thread = 0;
+ connection_name.str= 0;
+ connection_name.length= 0;
+
bzero(&variables, sizeof(variables));
- thread_id= 0;
one_shot_set= 0;
file_id = 0;
query_id= 0;
@@ -890,6 +887,7 @@ THD::THD()
mysql_audit_init_thd(this);
#endif
net.vio=0;
+ net.buff= 0;
client_capabilities= 0; // minimalistic client
ull=0;
system_thread= NON_SYSTEM_THREAD;
@@ -931,7 +929,7 @@ THD::THD()
user_connect=(USER_CONN *)0;
my_hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0,
(my_hash_get_key) get_var_key,
- (my_hash_free_key) free_user_var, 0);
+ (my_hash_free_key) free_user_var, HASH_THREAD_SPECIFIC);
sp_proc_cache= NULL;
sp_func_cache= NULL;
@@ -939,7 +937,7 @@ THD::THD()
/* For user vars replication*/
if (opt_bin_log)
my_init_dynamic_array(&user_var_events,
- sizeof(BINLOG_USER_VAR_EVENT *), 16, 16);
+ sizeof(BINLOG_USER_VAR_EVENT *), 16, 16, MYF(0));
else
bzero((char*) &user_var_events, sizeof(user_var_events));
@@ -962,6 +960,8 @@ THD::THD()
prepare_derived_at_open= FALSE;
create_tmp_table_for_derived= FALSE;
save_prep_leaf_list= FALSE;
+ /* Restore THR_THD */
+ set_current_thd(old_THR_THD);
}
@@ -1230,6 +1230,7 @@ extern "C" THD *_current_thd_noinline(void)
void THD::init(void)
{
+ DBUG_ENTER("thd::init");
mysql_mutex_lock(&LOCK_global_system_variables);
plugin_thdvar_init(this);
/*
@@ -1238,7 +1239,14 @@ void THD::init(void)
avoid temporary tables replication failure.
*/
variables.pseudo_thread_id= thread_id;
+
+ variables.default_master_connection.str= default_master_connection_buff;
+ ::strmake(variables.default_master_connection.str,
+ global_system_variables.default_master_connection.str,
+ variables.default_master_connection.length);
+
mysql_mutex_unlock(&LOCK_global_system_variables);
+
server_status= SERVER_STATUS_AUTOCOMMIT;
if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)
server_status|= SERVER_STATUS_NO_BACKSLASH_ESCAPES;
@@ -1252,7 +1260,7 @@ void THD::init(void)
tx_isolation= (enum_tx_isolation) variables.tx_isolation;
update_charset();
reset_current_stmt_binlog_format_row();
- bzero((char *) &status_var, sizeof(status_var));
+ set_status_var_init();
bzero((char *) &org_status_var, sizeof(org_status_var));
if (variables.sql_log_bin)
@@ -1268,6 +1276,8 @@ void THD::init(void)
/* Initialize the Debug Sync Facility. See debug_sync.cc. */
debug_sync_init_thread(this);
#endif /* defined(ENABLED_DEBUG_SYNC) */
+ apc_target.init(&LOCK_thd_data);
+ DBUG_VOID_RETURN;
}
@@ -1433,6 +1443,7 @@ void THD::cleanup(void)
ull= NULL;
}
+ apc_target.destroy();
cleanup_done=1;
DBUG_VOID_RETURN;
}
@@ -1440,8 +1451,16 @@ void THD::cleanup(void)
THD::~THD()
{
+ THD *orig_thd= current_thd;
THD_CHECK_SENTRY(this);
DBUG_ENTER("~THD()");
+
+ /*
+ In error cases, thd may not be current thd. We have to fix this so
+ that memory allocation counting is done correctly
+ */
+ set_current_thd(this);
+
/* Ensure that no one is using THD */
mysql_mutex_lock(&LOCK_thd_data);
mysys_var=0; // Safety (shouldn't be needed)
@@ -1450,10 +1469,8 @@ THD::~THD()
/* Close connection */
#ifndef EMBEDDED_LIBRARY
if (net.vio)
- {
vio_delete(net.vio);
- net_end(&net);
- }
+ net_end(&net);
#endif
stmt_map.reset(); /* close all prepared statements */
if (!cleanup_done)
@@ -1488,6 +1505,15 @@ THD::~THD()
#endif
free_root(&main_mem_root, MYF(0));
+ main_warning_info.free_memory();
+ if (status_var.memory_used != 0)
+ {
+ DBUG_PRINT("error", ("memory_used: %lld", status_var.memory_used));
+ SAFEMALLOC_REPORT_MEMORY(my_thread_dbug_id());
+ DBUG_ASSERT(status_var.memory_used == 0); // Ensure everything is freed
+ }
+
+ set_current_thd(orig_thd);
DBUG_VOID_RETURN;
}
@@ -1763,7 +1789,7 @@ bool THD::store_globals()
*/
DBUG_ASSERT(thread_stack);
- if (my_pthread_setspecific_ptr(THR_THD, this) ||
+ if (set_current_thd(this) ||
my_pthread_setspecific_ptr(THR_MALLOC, &mem_root))
return 1;
/*
@@ -1807,7 +1833,7 @@ void THD::reset_globals()
mysql_mutex_unlock(&LOCK_thd_data);
/* Undocking the thread specific data. */
- my_pthread_setspecific_ptr(THR_THD, NULL);
+ set_current_thd(0);
my_pthread_setspecific_ptr(THR_MALLOC, NULL);
}
@@ -2088,6 +2114,20 @@ CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length)
int THD::send_explain_fields(select_result *result)
{
List<Item> field_list;
+ make_explain_field_list(field_list);
+ return (result->send_result_set_metadata(field_list,
+ Protocol::SEND_NUM_ROWS |
+ Protocol::SEND_EOF));
+}
+
+
+/*
+ Populate the provided field_list with EXPLAIN output columns.
+ this->lex->describe has the EXPLAIN flags
+*/
+
+void THD::make_explain_field_list(List<Item> &field_list)
+{
Item *item;
CHARSET_INFO *cs= system_charset_info;
field_list.push_back(item= new Item_return_int("id",3, MYSQL_TYPE_LONGLONG));
@@ -2126,10 +2166,9 @@ int THD::send_explain_fields(select_result *result)
}
item->maybe_null= 1;
field_list.push_back(new Item_empty_string("Extra", 255, cs));
- return (result->send_result_set_metadata(field_list,
- Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF));
}
+
#ifdef SIGNAL_WITH_VIO_CLOSE
void THD::close_active_vio()
{
@@ -2361,6 +2400,7 @@ int select_send::send_data(List<Item> &items)
DBUG_RETURN(0);
}
+
bool select_send::send_eof()
{
/*
@@ -3254,6 +3294,10 @@ void THD::end_statement()
}
+/*
+ Start using arena specified by @set. Current arena data will be saved to
+ *backup.
+*/
void THD::set_n_backup_active_arena(Query_arena *set, Query_arena *backup)
{
DBUG_ENTER("THD::set_n_backup_active_arena");
@@ -3268,6 +3312,12 @@ void THD::set_n_backup_active_arena(Query_arena *set, Query_arena *backup)
}
+/*
+ Stop using the temporary arena, and start again using the arena that is
+ specified in *backup.
+ The temporary arena is returned back into *set.
+*/
+
void THD::restore_active_arena(Query_arena *set, Query_arena *backup)
{
DBUG_ENTER("THD::restore_active_arena");
@@ -3639,7 +3689,8 @@ void thd_increment_net_big_packet_count(ulong length)
void THD::set_status_var_init()
{
- bzero((char*) &status_var, sizeof(status_var));
+ bzero((char*) &status_var, offsetof(STATUS_VAR,
+ last_cleared_system_status_var));
}
@@ -3844,12 +3895,7 @@ void THD::restore_backup_open_tables_state(Open_tables_backup *backup)
#undef thd_killed
extern "C" int thd_killed(const MYSQL_THD thd)
{
- if (!thd)
- thd= current_thd;
-
- if (!(thd->killed & KILL_HARD_BIT))
- return 0;
- return thd->killed != 0;
+ return thd_kill_level(thd) > THD_ABORT_SOFTLY;
}
#else
#error now thd_killed() function can go away
@@ -3861,8 +3907,17 @@ extern "C" int thd_killed(const MYSQL_THD thd)
*/
extern "C" enum thd_kill_levels thd_kill_level(const MYSQL_THD thd)
{
+ THD* current= current_thd;
+
if (!thd)
- thd= current_thd;
+ thd= current;
+
+ if (thd == current)
+ {
+ Apc_target *apc_target= (Apc_target*)&thd->apc_target;
+ if (apc_target->have_apc_requests())
+ apc_target->process_apc_requests();
+ }
if (likely(thd->killed == NOT_KILLED))
return THD_IS_NOT_KILLED;
@@ -4960,27 +5015,6 @@ THD::binlog_prepare_pending_rows_event(TABLE* table, uint32 serv_id,
DBUG_RETURN(pending); /* This is the current pending event */
}
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-/*
- Instantiate the versions we need, we have -fno-implicit-template as
- compiling option.
-*/
-template Rows_log_event*
-THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*,
- size_t, size_t, bool,
- Write_rows_log_event*);
-
-template Rows_log_event*
-THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*,
- size_t colcnt, size_t, bool,
- Delete_rows_log_event *);
-
-template Rows_log_event*
-THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*,
- size_t colcnt, size_t, bool,
- Update_rows_log_event *);
-#endif
-
/* Declare in unnamed namespace. */
CPP_UNNAMED_NS_START
/**
diff --git a/sql/sql_class.h b/sql/sql_class.h
index cff6acbb9c9..87b9783c1ed 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -43,7 +43,7 @@
#include "violite.h" /* vio_is_connected */
#include "thr_lock.h" /* thr_lock_type, THR_LOCK_DATA,
THR_LOCK_INFO */
-
+#include "my_apc.h"
class Reprepare_observer;
class Relay_log_info;
@@ -149,9 +149,6 @@ public:
};
-#define TC_LOG_PAGE_SIZE 8192
-#define TC_LOG_MIN_SIZE (3*TC_LOG_PAGE_SIZE)
-
#define TC_HEURISTIC_RECOVER_COMMIT 1
#define TC_HEURISTIC_RECOVER_ROLLBACK 2
extern ulong tc_heuristic_recover;
@@ -502,6 +499,7 @@ typedef struct system_variables
ulong net_write_timeout;
ulong optimizer_prune_level;
ulong optimizer_search_depth;
+ ulong use_stat_tables;
ulong preload_buff_size;
ulong profiling_history_size;
ulong read_buff_size;
@@ -536,6 +534,12 @@ typedef struct system_variables
thread the query is being run to replicate temp tables properly
*/
my_thread_id pseudo_thread_id;
+ /**
+ Place holders to store Multi-source variables in sys_var.cc during
+ update and show of variables.
+ */
+ ulong slave_skip_counter;
+ ulong max_relay_log_size;
my_bool low_priority_updates;
my_bool query_cache_wlock_invalidate;
@@ -560,6 +564,9 @@ typedef struct system_variables
CHARSET_INFO *collation_database;
CHARSET_INFO *collation_connection;
+ /* Names. These will be allocated in buffers in thd */
+ LEX_STRING default_master_connection;
+
/* Error messages */
MY_LOCALE *lc_messages;
/* Locale Support */
@@ -676,6 +683,8 @@ typedef struct system_status_var
ulonglong binlog_bytes_written;
double last_query_cost;
double cpu_time, busy_time;
+ /* Don't initialize */
+ volatile int64 memory_used; /* This shouldn't be accumulated */
} STATUS_VAR;
/*
@@ -685,6 +694,7 @@ typedef struct system_status_var
*/
#define last_system_status_var questions
+#define last_cleared_system_status_var memory_used
void mark_transaction_to_rollback(THD *thd, bool all);
@@ -1249,7 +1259,8 @@ enum enum_thread_type
SYSTEM_THREAD_SLAVE_SQL= 4,
SYSTEM_THREAD_NDBCLUSTER_BINLOG= 8,
SYSTEM_THREAD_EVENT_SCHEDULER= 16,
- SYSTEM_THREAD_EVENT_WORKER= 32
+ SYSTEM_THREAD_EVENT_WORKER= 32,
+ SYSTEM_THREAD_BINLOG_BACKGROUND= 64
};
inline char const *
@@ -1417,7 +1428,8 @@ public:
m_reopen_array(NULL),
m_locked_tables_count(0)
{
- init_sql_alloc(&m_locked_tables_root, MEM_ROOT_BLOCK_SIZE, 0);
+ init_sql_alloc(&m_locked_tables_root, MEM_ROOT_BLOCK_SIZE, 0,
+ MYF(MY_THREAD_SPECIFIC));
}
void unlock_locked_tables(THD *thd);
~Locked_tables_list()
@@ -1523,6 +1535,11 @@ private:
extern "C" void my_message_sql(uint error, const char *str, myf MyFlags);
+class THD;
+#ifndef DBUG_OFF
+void dbug_serve_apcs(THD *thd, int n_calls);
+#endif
+
/**
@class THD
For each client connection we create a separate thread with THD serving as
@@ -1868,7 +1885,8 @@ public:
{
bzero((char*)this, sizeof(*this));
xid_state.xid.null();
- init_sql_alloc(&mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
+ init_sql_alloc(&mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0,
+ MYF(MY_THREAD_SPECIFIC));
}
} transaction;
Global_read_lock global_read_lock;
@@ -2192,9 +2210,25 @@ public:
*/
killed_state volatile killed;
+ /* See also thd_killed() */
+ inline bool check_killed()
+ {
+ if (killed)
+ return TRUE;
+ if (apc_target.have_apc_requests())
+ apc_target.process_apc_requests();
+ return FALSE;
+ }
+
/* scramble - random string sent to client on handshake */
char scramble[SCRAMBLE_LENGTH+1];
+ /*
+ If this is a slave, the name of the connection stored here.
+ This is used for taging error messages in the log files.
+ */
+ LEX_STRING connection_name;
+ char default_master_connection_buff[MAX_CONNECTION_NAME+1];
bool slave_thread, one_shot_set;
bool extra_port; /* If extra connection */
@@ -2391,10 +2425,21 @@ public:
void close_active_vio();
#endif
void awake(killed_state state_to_set);
-
+
/** Disconnect the associated communication endpoint. */
void disconnect();
+
+ /*
+ Allows this thread to serve as a target for others to schedule Async
+ Procedure Calls on.
+
+ It's possible to schedule any code to be executed this way, by
+ inheriting from the Apc_call object. Currently, only
+ Show_explain_request uses this.
+ */
+ Apc_target apc_target;
+
#ifndef MYSQL_CLIENT
enum enum_binlog_query_type {
/* The query can be logged in row format or in statement format. */
@@ -2588,7 +2633,7 @@ public:
void add_changed_table(const char *key, long key_length);
CHANGED_TABLE_LIST * changed_table_dup(const char *key, long key_length);
int send_explain_fields(select_result *result);
-
+ void make_explain_field_list(List<Item> &field_list);
/**
Clear the current error, if any.
We do not clear is_fatal_error or is_fatal_sub_stmt_error since we
@@ -2738,6 +2783,27 @@ public:
void set_n_backup_active_arena(Query_arena *set, Query_arena *backup);
void restore_active_arena(Query_arena *set, Query_arena *backup);
+ inline void get_binlog_format(enum_binlog_format *format,
+ enum_binlog_format *current_format)
+ {
+ *format= (enum_binlog_format) variables.binlog_format;
+ *current_format= current_stmt_binlog_format;
+ }
+ inline void set_binlog_format(enum_binlog_format format,
+ enum_binlog_format current_format)
+ {
+ DBUG_ENTER("set_binlog_format");
+ variables.binlog_format= format;
+ current_stmt_binlog_format= current_format;
+ DBUG_VOID_RETURN;
+ }
+ inline void set_binlog_format_stmt()
+ {
+ DBUG_ENTER("set_binlog_format_stmt");
+ variables.binlog_format= BINLOG_FORMAT_STMT;
+ current_stmt_binlog_format= BINLOG_FORMAT_STMT;
+ DBUG_VOID_RETURN;
+ }
/*
@todo Make these methods private or remove them completely. Only
decide_logging_format should call them. /Sven
@@ -2768,16 +2834,26 @@ public:
DBUG_VOID_RETURN;
}
+
inline void set_current_stmt_binlog_format_row()
{
DBUG_ENTER("set_current_stmt_binlog_format_row");
current_stmt_binlog_format= BINLOG_FORMAT_ROW;
DBUG_VOID_RETURN;
}
- inline void clear_current_stmt_binlog_format_row()
+ /* Set binlog format temporarily to statement. Returns old format */
+ inline enum_binlog_format set_current_stmt_binlog_format_stmt()
{
- DBUG_ENTER("clear_current_stmt_binlog_format_row");
+ enum_binlog_format orig_format= current_stmt_binlog_format;
+ DBUG_ENTER("set_current_stmt_binlog_format_stmt");
current_stmt_binlog_format= BINLOG_FORMAT_STMT;
+ DBUG_RETURN(orig_format);
+ }
+ inline void restore_stmt_binlog_format(enum_binlog_format format)
+ {
+ DBUG_ENTER("restore_stmt_binlog_format");
+ DBUG_ASSERT(!is_current_stmt_binlog_format_row());
+ current_stmt_binlog_format= format;
DBUG_VOID_RETURN;
}
inline void reset_current_stmt_binlog_format_row()
@@ -2807,7 +2883,7 @@ public:
if (variables.binlog_format == BINLOG_FORMAT_ROW)
set_current_stmt_binlog_format_row();
else if (temporary_tables == NULL)
- clear_current_stmt_binlog_format_row();
+ set_current_stmt_binlog_format_stmt();
}
DBUG_VOID_RETURN;
}
@@ -3211,10 +3287,42 @@ public:
class JOIN;
-class select_result :public Sql_alloc {
+/* Pure interface for sending tabular data */
+class select_result_sink: public Sql_alloc
+{
+public:
+ /*
+ send_data returns 0 on ok, 1 on error and -1 if data was ignored, for
+ example for a duplicate row entry written to a temp table.
+ */
+ virtual int send_data(List<Item> &items)=0;
+ virtual ~select_result_sink() {};
+};
+
+
+/*
+ Interface for sending tabular data, together with some other stuff:
+
+ - Primary purpose seems to be seding typed tabular data:
+ = the DDL is sent with send_fields()
+ = the rows are sent with send_data()
+ Besides that,
+ - there seems to be an assumption that the sent data is a result of
+ SELECT_LEX_UNIT *unit,
+ - nest_level is used by SQL parser
+*/
+
+class select_result :public select_result_sink
+{
protected:
THD *thd;
+ /*
+ All descendant classes have their send_data() skip the first
+ unit->offset_limit_cnt rows sent. Select_materialize
+ also uses unit->get_unit_column_types().
+ */
SELECT_LEX_UNIT *unit;
+ /* Something used only by the parser: */
public:
select_result();
virtual ~select_result() {};
@@ -3232,11 +3340,6 @@ public:
virtual uint field_count(List<Item> &fields) const
{ return fields.elements; }
virtual bool send_result_set_metadata(List<Item> &list, uint flags)=0;
- /*
- send_data returns 0 on ok, 1 on error and -1 if data was ignored, for
- example for a duplicate row entry written to a temp table.
- */
- virtual int send_data(List<Item> &items)=0;
virtual bool initialize_tables (JOIN *join=0) { return 0; }
virtual void send_error(uint errcode,const char *err);
virtual bool send_eof()=0;
@@ -3264,6 +3367,32 @@ public:
/*
+ This is a select_result_sink which simply writes all data into a (temporary)
+ table. Creation/deletion of the table is outside of the scope of the class
+
+ It is aimed at capturing SHOW EXPLAIN output, so:
+ - Unlike select_result class, we don't assume that the sent data is an
+ output of a SELECT_LEX_UNIT (and so we dont apply "LIMIT x,y" from the
+ unit)
+ - We don't try to convert the target table to MyISAM
+*/
+
+class select_result_explain_buffer : public select_result_sink
+{
+public:
+ select_result_explain_buffer(THD *thd_arg, TABLE *table_arg) :
+ thd(thd_arg), dst_table(table_arg) {};
+
+ THD *thd;
+ TABLE *dst_table; /* table to write into */
+
+ /* The following is called in the child thread: */
+ int send_data(List<Item> &items);
+};
+
+
+
+/*
Base class for select_result descendands which intercept and
transform result set rows. As the rows are not sent to the client,
sending of result set metadata should be suppressed as well.
@@ -3834,6 +3963,8 @@ class user_var_entry
DTCollation collation;
};
+user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
+ bool create_if_not_exists);
/*
Unique -- class for unique (removing of duplicates).
@@ -4103,6 +4234,21 @@ public:
*/
#define CF_CAN_GENERATE_ROW_EVENTS (1U << 9)
+/**
+ Statement that need the binlog format to be unchanged.
+*/
+#define CF_FORCE_ORIGINAL_BINLOG_FORMAT (1U << 10)
+
+/**
+ Statement that inserts new rows (INSERT, REPLACE, LOAD, ALTER TABLE)
+*/
+#define CF_INSERTS_DATA (1U << 11)
+
+/**
+ Statement that updates existing rows (UPDATE, multi-update)
+*/
+#define CF_UPDATES_DATA (1U << 12)
+
/* Bits in server_command_flags */
/**
diff --git a/sql/sql_const.h b/sql/sql_const.h
index de2704a8553..c6aa52197d5 100644
--- a/sql/sql_const.h
+++ b/sql/sql_const.h
@@ -38,6 +38,7 @@
#define MAX_REFLENGTH 4 /* Max length for record ref */
#endif
#define MAX_HOSTNAME 61 /* len+1 in mysql.user */
+#define MAX_CONNECTION_NAME NAME_LEN
#define MAX_MBWIDTH 3 /* Max multibyte sequence */
#define MAX_FIELD_CHARLENGTH 255
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index cbe59503058..87650e643f2 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -37,6 +37,7 @@
#include "sp.h"
#include "events.h"
#include "sql_handler.h"
+#include "sql_statistics.h"
#include <my_dir.h>
#include <m_ctype.h>
#include "log.h"
@@ -817,6 +818,17 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
lock_db_routines(thd, db))
goto exit;
+ if (!in_bootstrap)
+ {
+ for (table= tables; table; table= table->next_local)
+ {
+ LEX_STRING db_name= { table->db, table->db_length };
+ LEX_STRING table_name= { table->table_name, table->table_name_length };
+ if (table->open_type == OT_BASE_ONLY || !find_temporary_table(thd, table))
+ (void) delete_statistics_for_table(thd, &db_name, &table_name);
+ }
+ }
+
/* mysql_ha_rm_tables() requires a non-null TABLE_LIST. */
if (tables)
mysql_ha_rm_tables(thd, tables);
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 3c245807b47..4bcb62ef764 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -35,6 +35,7 @@
#include "sql_select.h"
#include "sp_head.h"
#include "sql_trigger.h"
+#include "sql_statistics.h"
#include "transaction.h"
#include "records.h" // init_read_record,
#include "sql_derived.h" // mysql_handle_list_of_derived
@@ -200,6 +201,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
#endif
/* Update the table->file->stats.records number */
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
+ set_statistics_for_table(thd, table);
table->covering_keys.clear_all();
table->quick_keys.clear_all(); // Can't use 'only index'
@@ -244,6 +246,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
uint length= 0;
SORT_FIELD *sortorder;
ha_rows examined_rows;
+ ha_rows found_rows;
table->update_const_key_parts(conds);
order= simple_remove_const(order, conds);
@@ -261,12 +264,14 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
{
DBUG_ASSERT(usable_index == MAX_KEY);
table->sort.io_cache= (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
- MYF(MY_FAE | MY_ZEROFILL));
+ MYF(MY_FAE | MY_ZEROFILL |
+ MY_THREAD_SPECIFIC));
if (!(sortorder= make_unireg_sortorder(order, &length, NULL)) ||
- (table->sort.found_records = filesort(thd, table, sortorder, length,
- select, HA_POS_ERROR, 1,
- &examined_rows))
+ (table->sort.found_records= filesort(thd, table, sortorder, length,
+ select, HA_POS_ERROR,
+ true,
+ &examined_rows, &found_rows))
== HA_POS_ERROR)
{
delete select;
diff --git a/sql/sql_error.cc b/sql/sql_error.cc
index 06da6250e71..23a60267737 100644
--- a/sql/sql_error.cc
+++ b/sql/sql_error.cc
@@ -457,23 +457,38 @@ Diagnostics_area::disable_status()
m_status= DA_DISABLED;
}
-Warning_info::Warning_info(ulonglong warn_id_arg, bool allow_unlimited_warnings)
+Warning_info::Warning_info(ulonglong warn_id_arg,
+ bool allow_unlimited_warnings, bool initialize)
:m_statement_warn_count(0),
m_current_row_for_warning(1),
m_warn_id(warn_id_arg),
m_allow_unlimited_warnings(allow_unlimited_warnings),
+ initialized(0),
m_read_only(FALSE)
{
- /* Initialize sub structures */
- init_sql_alloc(&m_warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE);
m_warn_list.empty();
bzero((char*) m_warn_count, sizeof(m_warn_count));
+ if (initialize)
+ init();
}
+void Warning_info::init()
+{
+ /* Initialize sub structures */
+ init_sql_alloc(&m_warn_root, WARN_ALLOC_BLOCK_SIZE,
+ WARN_ALLOC_PREALLOC_SIZE, MYF(MY_THREAD_SPECIFIC));
+ initialized= 1;
+}
+
+void Warning_info::free_memory()
+{
+ if (initialized)
+ free_root(&m_warn_root,MYF(0));
+}
Warning_info::~Warning_info()
{
- free_root(&m_warn_root,MYF(0));
+ free_memory();
}
@@ -484,7 +499,7 @@ Warning_info::~Warning_info()
void Warning_info::clear_warning_info(ulonglong warn_id_arg)
{
m_warn_id= warn_id_arg;
- free_root(&m_warn_root, MYF(0));
+ free_memory();
bzero((char*) m_warn_count, sizeof(m_warn_count));
m_warn_list.empty();
m_statement_warn_count= 0;
diff --git a/sql/sql_error.h b/sql/sql_error.h
index 00ade934226..f018387eb3b 100644
--- a/sql/sql_error.h
+++ b/sql/sql_error.h
@@ -355,15 +355,21 @@ class Warning_info
/** Indicates if push_warning() allows unlimited number of warnings. */
bool m_allow_unlimited_warnings;
+ bool initialized; /* Set to 1 if init() has been called */
private:
Warning_info(const Warning_info &rhs); /* Not implemented */
Warning_info& operator=(const Warning_info &rhs); /* Not implemented */
public:
- Warning_info(ulonglong warn_id_arg, bool allow_unlimited_warnings);
+ Warning_info(ulonglong warn_id_arg, bool allow_unlimited_warnings,
+ bool initialize=true);
~Warning_info();
+ /* Allocate memory for structures */
+ void init();
+ void free_memory();
+
/**
Reset the warning information. Clear all warnings,
the number of warnings, reset current row counter
diff --git a/sql/sql_expression_cache.cc b/sql/sql_expression_cache.cc
index e65ec3c22b0..1193c7c27f4 100644
--- a/sql/sql_expression_cache.cc
+++ b/sql/sql_expression_cache.cc
@@ -257,7 +257,7 @@ my_bool Expression_cache_tmptable::put_value(Item *value)
}
*(items.head_ref())= value;
- fill_record(table_thd, cache_table->field, items, TRUE, TRUE);
+ fill_record(table_thd, cache_table, cache_table->field, items, TRUE, TRUE);
if (table_thd->is_error())
goto err;;
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index d779f679021..1c93a59904c 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -323,7 +323,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen)
/* copy data to sql_handler */
if (!(sql_handler= new SQL_HANDLER(thd)))
goto err;
- init_alloc_root(&sql_handler->mem_root, 1024, 0);
+ init_alloc_root(&sql_handler->mem_root, 1024, 0, MYF(MY_THREAD_SPECIFIC));
sql_handler->db.length= strlen(tables->db);
sql_handler->table_name.length= strlen(tables->table_name);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index fc6151e44d9..833e134bff9 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1,5 +1,6 @@
/*
Copyright (c) 2000, 2011, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -191,11 +192,6 @@ error:
different table maps, like on select ... insert
map Store here table map for used fields
- NOTE
- Clears TIMESTAMP_AUTO_SET_ON_INSERT from table->timestamp_field_type
- or leaves it as is, depending on if timestamp should be updated or
- not.
-
RETURN
0 OK
-1 Error
@@ -234,8 +230,6 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
if (check_grant_all_columns(thd, INSERT_ACL, &field_it))
return -1;
#endif
- clear_timestamp_auto_bits(table->timestamp_field_type,
- TIMESTAMP_AUTO_SET_ON_INSERT);
/*
No fields are provided so all fields must be provided in the values.
Thus we set all bits in the write set.
@@ -295,18 +289,8 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dup_field->field_name);
return -1;
}
- if (table->timestamp_field) // Don't automaticly set timestamp if used
- {
- if (bitmap_is_set(table->write_set,
- table->timestamp_field->field_index))
- clear_timestamp_auto_bits(table->timestamp_field_type,
- TIMESTAMP_AUTO_SET_ON_INSERT);
- else
- {
- bitmap_set_bit(table->write_set,
- table->timestamp_field->field_index);
- }
- }
+ if (table->default_field)
+ table->mark_default_fields_for_write();
}
/* Mark virtual columns used in the insert statement */
if (table->vfield)
@@ -339,9 +323,6 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
update_fields The update fields.
NOTE
- If the update fields include the timestamp field,
- remove TIMESTAMP_AUTO_SET_ON_UPDATE from table->timestamp_field_type.
-
If the update fields include an autoinc field, set the
table->next_number_field_updated flag.
@@ -355,21 +336,9 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
List<Item> &update_values, table_map *map)
{
TABLE *table= insert_table_list->table;
- my_bool timestamp_mark;
my_bool autoinc_mark;
- LINT_INIT(timestamp_mark);
LINT_INIT(autoinc_mark);
- if (table->timestamp_field)
- {
- /*
- Unmark the timestamp field so that we can check if this is modified
- by update_fields
- */
- timestamp_mark= bitmap_test_and_clear(table->write_set,
- table->timestamp_field->field_index);
- }
-
table->next_number_field_updated= FALSE;
if (table->found_next_number_field)
@@ -393,17 +362,8 @@ static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list,
insert_table_list, map, false))
return -1;
- if (table->timestamp_field)
- {
- /* Don't set timestamp column if this is modified. */
- if (bitmap_is_set(table->write_set,
- table->timestamp_field->field_index))
- clear_timestamp_auto_bits(table->timestamp_field_type,
- TIMESTAMP_AUTO_SET_ON_UPDATE);
- if (timestamp_mark)
- bitmap_set_bit(table->write_set,
- table->timestamp_field->field_index);
- }
+ if (table->default_field)
+ table->mark_default_fields_for_write();
if (table->found_next_number_field)
{
@@ -709,7 +669,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
int error, res;
bool transactional_table, joins_freed= FALSE;
bool changed;
- bool was_insert_delayed= (table_list->lock_type == TL_WRITE_DELAYED);
+ const bool was_insert_delayed= (table_list->lock_type == TL_WRITE_DELAYED);
bool using_bulk_insert= 0;
uint value_count;
ulong counter = 1;
@@ -850,10 +810,10 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
table->next_number_field=table->found_next_number_field;
#ifdef HAVE_REPLICATION
- if (thd->slave_thread &&
+ if (thd->rli_slave &&
(info.handle_duplicates == DUP_UPDATE) &&
(table->next_number_field != NULL) &&
- rpl_master_has_bug(&active_mi->rli, 24432, TRUE, NULL, NULL))
+ rpl_master_has_bug(thd->rli_slave, 24432, TRUE, NULL, NULL))
goto abort;
#endif
@@ -913,8 +873,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
if (fields.elements || !value_count)
{
restore_record(table,s->default_values); // Get empty record
- if (fill_record_n_invoke_before_triggers(thd, fields, *values, 0,
- table->triggers,
+ if (fill_record_n_invoke_before_triggers(thd, table, fields, *values, 0,
TRG_EVENT_INSERT))
{
if (values_list.elements != 1 && ! thd->is_error())
@@ -958,8 +917,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
share->default_values[share->null_bytes - 1];
}
}
- if (fill_record_n_invoke_before_triggers(thd, table->field, *values, 0,
- table->triggers,
+ if (fill_record_n_invoke_before_triggers(thd, table, table->field, *values, 0,
TRG_EVENT_INSERT))
{
if (values_list.elements != 1 && ! thd->is_error())
@@ -971,6 +929,11 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
break;
}
}
+ if (table->default_field && table->update_default_fields())
+ {
+ error= 1;
+ break;
+ }
if ((res= table_list->view_check_option(thd,
(values_list.elements == 1 ?
@@ -987,7 +950,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
if (lock_type == TL_WRITE_DELAYED)
{
LEX_STRING const st_query = { query, thd->query_length() };
+ DEBUG_SYNC(thd, "before_write_delayed");
error=write_delayed(thd, table, duplic, st_query, ignore, log_on);
+ DEBUG_SYNC(thd, "after_write_delayed");
query=0;
}
else
@@ -1697,13 +1662,32 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
restore_record(table,record[1]);
DBUG_ASSERT(info->update_fields->elements ==
info->update_values->elements);
- if (fill_record_n_invoke_before_triggers(thd, *info->update_fields,
+ if (fill_record_n_invoke_before_triggers(thd, table, *info->update_fields,
*info->update_values,
info->ignore,
- table->triggers,
TRG_EVENT_UPDATE))
goto before_trg_err;
+ bool different_records= (!records_are_comparable(table) ||
+ compare_record(table));
+ /*
+ Default fields must be updated before checking view updateability.
+ This branch of INSERT is executed only when a UNIQUE key was violated
+ with the ON DUPLICATE KEY UPDATE option. In this case the INSERT
+ operation is transformed to an UPDATE, and the default fields must
+ be updated as if this is an UPDATE.
+ */
+ if (different_records && table->default_field)
+ {
+ bool res;
+ enum_sql_command cmd= thd->lex->sql_command;
+ thd->lex->sql_command= SQLCOM_UPDATE;
+ res= table->update_default_fields();
+ thd->lex->sql_command= cmd;
+ if (res)
+ goto err;
+ }
+
/* CHECK OPTION for VIEW ... ON DUPLICATE KEY UPDATE ... */
if (info->view &&
(res= info->view->view_check_option(current_thd, info->ignore)) ==
@@ -1714,7 +1698,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
table->file->restore_auto_increment(prev_insert_id);
info->touched++;
- if (!records_are_comparable(table) || compare_record(table))
+ if (different_records)
{
if ((error=table->file->ha_update_row(table->record[1],
table->record[0])) &&
@@ -1785,8 +1769,6 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
*/
if (last_uniq_key(table,key_nr) &&
!table->file->referenced_by_foreign_key() &&
- (table->timestamp_field_type == TIMESTAMP_NO_AUTO_SET ||
- table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH) &&
(!table->triggers || !table->triggers->has_delete_triggers()))
{
if ((error=table->file->ha_update_row(table->record[1],
@@ -1949,7 +1931,6 @@ public:
ulonglong forced_insert_id;
ulong auto_increment_increment;
ulong auto_increment_offset;
- timestamp_auto_set_type timestamp_field_type;
LEX_STRING query;
Time_zone *time_zone;
@@ -2320,7 +2301,7 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
{
my_ptrdiff_t adjust_ptrs;
Field **field,**org_field, *found_next_number_field;
- Field **UNINIT_VAR(vfield);
+ Field **UNINIT_VAR(vfield), **UNINIT_VAR(dfield_ptr);
TABLE *copy;
TABLE_SHARE *share;
uchar *bitmap;
@@ -2396,6 +2377,14 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
bitmap= (uchar*) (field + share->fields + 1);
copy->record[0]= (bitmap + share->column_bitmap_size*3);
memcpy((char*) copy->record[0], (char*) table->record[0], share->reclength);
+ if (share->default_fields)
+ {
+ copy->default_field= (Field**) client_thd->alloc((share->default_fields+1)*
+ sizeof(Field**));
+ if (!copy->default_field)
+ goto error;
+ dfield_ptr= copy->default_field;
+ }
/*
Make a copy of all fields.
The copied fields need to point into the copied record. This is done
@@ -2413,6 +2402,15 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
(*field)->move_field_offset(adjust_ptrs); // Point at copy->record[0]
if (*org_field == found_next_number_field)
(*field)->table->found_next_number_field= *field;
+ if (share->default_fields &&
+ ((*org_field)->has_insert_default_function() ||
+ (*org_field)->has_update_default_function()))
+ {
+ /* Put the newly copied field into the set of default fields. */
+ *dfield_ptr= *field;
+ (*dfield_ptr)->unireg_check= (*org_field)->unireg_check;
+ dfield_ptr++;
+ }
}
*field=0;
@@ -2437,15 +2435,8 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
*vfield= 0;
}
- /* Adjust timestamp */
- if (table->timestamp_field)
- {
- /* Restore offset as this may have been reset in handle_inserts */
- copy->timestamp_field=
- (Field_timestamp*) copy->field[share->timestamp_field_offset];
- copy->timestamp_field->unireg_check= table->timestamp_field->unireg_check;
- copy->timestamp_field_type= copy->timestamp_field->get_auto_set_type();
- }
+ if (share->default_fields)
+ *dfield_ptr= NULL;
/* Adjust in_use for pointing to client thread */
copy->in_use= client_thd;
@@ -2518,7 +2509,9 @@ int write_delayed(THD *thd, TABLE *table, enum_duplicates duplic,
goto err;
}
- if (!(row->record= (char*) my_malloc(table->s->reclength, MYF(MY_WME))))
+ /* This can't be THREAD_SPECIFIC as it's freed in delayed thread */
+ if (!(row->record= (char*) my_malloc(table->s->reclength,
+ MYF(MY_WME))))
goto err;
memcpy(row->record, table->record[0], table->s->reclength);
row->start_time= thd->start_time;
@@ -2535,7 +2528,6 @@ int write_delayed(THD *thd, TABLE *table, enum_duplicates duplic,
thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt;
row->first_successful_insert_id_in_prev_stmt=
thd->first_successful_insert_id_in_prev_stmt;
- row->timestamp_field_type= table->timestamp_field_type;
/* Add session variable timezone
Time_zone object will not be freed even the thread is ended.
@@ -2943,24 +2935,28 @@ pthread_handler_t handle_delayed_insert(void *arg)
DBUG_LEAVE;
}
- di->table=0;
- thd->killed= KILL_CONNECTION; // If error
- mysql_mutex_unlock(&di->mutex);
+ {
+ DBUG_ENTER("handle_delayed_insert-cleanup");
+ di->table=0;
+ thd->killed= KILL_CONNECTION; // If error
+ mysql_mutex_unlock(&di->mutex);
- close_thread_tables(thd); // Free the table
- thd->mdl_context.release_transactional_locks();
- mysql_cond_broadcast(&di->cond_client); // Safety
+ close_thread_tables(thd); // Free the table
+ thd->mdl_context.release_transactional_locks();
+ mysql_cond_broadcast(&di->cond_client); // Safety
- mysql_mutex_lock(&LOCK_delayed_create); // Because of delayed_get_table
- mysql_mutex_lock(&LOCK_delayed_insert);
- /*
- di should be unlinked from the thread handler list and have no active
- clients
- */
- delete di;
- mysql_mutex_unlock(&LOCK_delayed_insert);
- mysql_mutex_unlock(&LOCK_delayed_create);
+ mysql_mutex_lock(&LOCK_delayed_create); // Because of delayed_get_table
+ mysql_mutex_lock(&LOCK_delayed_insert);
+ /*
+ di should be unlinked from the thread handler list and have no active
+ clients
+ */
+ delete di;
+ mysql_mutex_unlock(&LOCK_delayed_insert);
+ mysql_mutex_unlock(&LOCK_delayed_create);
+ DBUG_LEAVE;
+ }
my_thread_end();
pthread_exit(0);
@@ -3085,7 +3081,6 @@ bool Delayed_insert::handle_inserts(void)
row->first_successful_insert_id_in_prev_stmt;
thd.stmt_depends_on_first_successful_insert_id_in_prev_stmt=
row->stmt_depends_on_first_successful_insert_id_in_prev_stmt;
- table->timestamp_field_type= row->timestamp_field_type;
table->auto_increment_field_not_null= row->auto_increment_field_not_null;
/* Copy the session variables. */
@@ -3470,10 +3465,10 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table->next_number_field=table->found_next_number_field;
#ifdef HAVE_REPLICATION
- if (thd->slave_thread &&
+ if (thd->rli_slave &&
(info.handle_duplicates == DUP_UPDATE) &&
(table->next_number_field != NULL) &&
- rpl_master_has_bug(&active_mi->rli, 24432, TRUE, NULL, NULL))
+ rpl_master_has_bug(thd->rli_slave, 24432, TRUE, NULL, NULL))
DBUG_RETURN(1);
#endif
@@ -3561,6 +3556,8 @@ int select_insert::send_data(List<Item> &values)
thd->count_cuted_fields= CHECK_FIELD_WARN; // Calculate cuted fields
store_values(values);
+ if (table->default_field && table->update_default_fields())
+ DBUG_RETURN(1);
thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL;
if (thd->is_error())
{
@@ -3620,11 +3617,11 @@ int select_insert::send_data(List<Item> &values)
void select_insert::store_values(List<Item> &values)
{
if (fields->elements)
- fill_record_n_invoke_before_triggers(thd, *fields, values, 1,
- table->triggers, TRG_EVENT_INSERT);
+ fill_record_n_invoke_before_triggers(thd, table, *fields, values, 1,
+ TRG_EVENT_INSERT);
else
- fill_record_n_invoke_before_triggers(thd, table->field, values, 1,
- table->triggers, TRG_EVENT_INSERT);
+ fill_record_n_invoke_before_triggers(thd, table, table->field, values, 1,
+ TRG_EVENT_INSERT);
}
void select_insert::send_error(uint errcode,const char *err)
@@ -3842,7 +3839,6 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
DBUG_ENTER("create_table_from_items");
tmp_table.alias= 0;
- tmp_table.timestamp_field= 0;
tmp_table.s= &share;
init_tmp_table_share(thd, &share, "", 0, "", "");
@@ -3851,6 +3847,8 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
tmp_table.null_row= 0;
tmp_table.maybe_null= 0;
+ promote_first_timestamp_column(&alter_info->create_list);
+
while ((item=it++))
{
Create_field *cr_field;
@@ -4090,8 +4088,6 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
for (Field **f= field ; *f ; f++)
bitmap_set_bit(table->write_set, (*f)->field_index);
- /* Don't set timestamp if used */
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
table->next_number_field=table->found_next_number_field;
restore_record(table,s->default_values); // Get empty record
@@ -4167,8 +4163,8 @@ select_create::binlog_show_create_table(TABLE **tables, uint count)
void select_create::store_values(List<Item> &values)
{
- fill_record_n_invoke_before_triggers(thd, field, values, 1,
- table->triggers, TRG_EVENT_INSERT);
+ fill_record_n_invoke_before_triggers(thd, table, field, values, 1,
+ TRG_EVENT_INSERT);
}
@@ -4277,16 +4273,3 @@ void select_create::abort_result_set()
DBUG_VOID_RETURN;
}
-
-/*****************************************************************************
- Instansiate templates
-*****************************************************************************/
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class List_iterator_fast<List_item>;
-#ifndef EMBEDDED_LIBRARY
-template class I_List<Delayed_insert>;
-template class I_List_iterator<Delayed_insert>;
-template class I_List<delayed_row>;
-#endif /* EMBEDDED_LIBRARY */
-#endif /* HAVE_EXPLICIT_TEMPLATE_INSTANTIATION */
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index 8424dd00d93..fde9f70fa79 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -934,7 +934,7 @@ int JOIN_CACHE::alloc_buffer()
{
ulong next_buff_size;
- if ((buff= (uchar*) my_malloc(buff_size, MYF(0))))
+ if ((buff= (uchar*) my_malloc(buff_size, MYF(MY_THREAD_SPECIFIC))))
break;
next_buff_size= buff_size > buff_size_decr ? buff_size-buff_size_decr : 0;
@@ -1012,7 +1012,7 @@ int JOIN_CACHE::realloc_buffer()
{
int rc;
free();
- rc= test(!(buff= (uchar*) my_malloc(buff_size, MYF(0))));
+ rc= test(!(buff= (uchar*) my_malloc(buff_size, MYF(MY_THREAD_SPECIFIC))));
reset(TRUE);
return rc;
}
@@ -2257,7 +2257,7 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
while (!(error= join_tab_scan->next()))
{
- if (join->thd->killed)
+ if (join->thd->check_killed())
{
/* The user has aborted the execution of the query */
join->thd->send_kill_message();
@@ -2527,7 +2527,7 @@ enum_nested_loop_state JOIN_CACHE::join_null_complements(bool skip_last)
for ( ; cnt; cnt--)
{
- if (join->thd->killed)
+ if (join->thd->check_killed())
{
/* The user has aborted the execution of the query */
join->thd->send_kill_message();
@@ -2801,7 +2801,7 @@ int JOIN_CACHE_HASHED::realloc_buffer()
{
int rc;
free();
- rc= test(!(buff= (uchar*) my_malloc(buff_size, MYF(0))));
+ rc= test(!(buff= (uchar*) my_malloc(buff_size, MYF(MY_THREAD_SPECIFIC))));
init_hash_table();
reset(TRUE);
return rc;
@@ -3377,7 +3377,7 @@ int JOIN_TAB_SCAN::next()
update_virtual_fields(thd, table);
while (!err && select && (skip_rc= select->skip_record(thd)) <= 0)
{
- if (thd->killed || skip_rc < 0)
+ if (thd->check_killed() || skip_rc < 0)
return 1;
/*
Move to the next record if the last retrieved record does not
@@ -3812,7 +3812,8 @@ uint JOIN_TAB_SCAN_MRR::aux_buffer_incr(ulong recno)
uint incr= 0;
TABLE_REF *ref= &join_tab->ref;
TABLE *tab= join_tab->table;
- uint rec_per_key= tab->key_info[ref->key].rec_per_key[ref->key_parts-1];
+ uint rec_per_key=
+ tab->key_info[ref->key].actual_rec_per_key(ref->key_parts-1);
set_if_bigger(rec_per_key, 1);
if (recno == 1)
incr= ref->key_length + tab->file->ref_length;
@@ -3893,13 +3894,15 @@ int JOIN_TAB_SCAN_MRR::next()
int rc= join_tab->table->file->multi_range_read_next((range_id_t*)ptr) ? -1 : 0;
if (!rc)
{
- /*
+ /*
If a record in in an incremental cache contains no fields then the
association for the last record in cache will be equal to cache->end_pos
- */
- DBUG_ASSERT((!(mrr_mode & HA_MRR_NO_ASSOCIATION))?
- (cache->buff <= (uchar *) (*ptr) &&
- (uchar *) (*ptr) <= cache->end_pos): TRUE);
+ */
+ /*
+ psergey: this makes no sense where HA_MRR_NO_ASSOC is used.
+ DBUG_ASSERT(cache->buff <= (uchar *) (*ptr) &&
+ (uchar *) (*ptr) <= cache->end_pos);
+ */
if (join_tab->table->vfield)
update_virtual_fields(join->thd, join_tab->table);
}
@@ -3909,663 +3912,663 @@ int JOIN_TAB_SCAN_MRR::next()
static
void bka_range_seq_key_info(void *init_params, uint *length,
- key_part_map *map)
+ key_part_map *map)
{
- TABLE_REF *ref= &(((JOIN_CACHE*)init_params)->join_tab->ref);
- *length= ref->key_length;
- *map= (key_part_map(1) << ref->key_parts) - 1;
+TABLE_REF *ref= &(((JOIN_CACHE*)init_params)->join_tab->ref);
+*length= ref->key_length;
+*map= (key_part_map(1) << ref->key_parts) - 1;
}
/*
- Initialize retrieval of range sequence for BKA join algorithm
-
- SYNOPSIS
- bka_range_seq_init()
- init_params pointer to the BKA join cache object
- n_ranges the number of ranges obtained
- flags combination of MRR flags
-
- DESCRIPTION
- The function interprets init_param as a pointer to a JOIN_CACHE_BKA
- object. The function prepares for an iteration over the join keys
- built for all records from the cache join buffer.
-
- NOTE
- This function are used only as a callback function.
-
- RETURN VALUE
- init_param value that is to be used as a parameter of bka_range_seq_next()
+Initialize retrieval of range sequence for BKA join algorithm
+
+SYNOPSIS
+ bka_range_seq_init()
+ init_params pointer to the BKA join cache object
+ n_ranges the number of ranges obtained
+ flags combination of MRR flags
+
+DESCRIPTION
+ The function interprets init_param as a pointer to a JOIN_CACHE_BKA
+ object. The function prepares for an iteration over the join keys
+ built for all records from the cache join buffer.
+
+NOTE
+ This function are used only as a callback function.
+
+RETURN VALUE
+ init_param value that is to be used as a parameter of bka_range_seq_next()
*/
static
range_seq_t bka_range_seq_init(void *init_param, uint n_ranges, uint flags)
{
- DBUG_ENTER("bka_range_seq_init");
- JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) init_param;
- cache->reset(0);
- DBUG_RETURN((range_seq_t) init_param);
+DBUG_ENTER("bka_range_seq_init");
+JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) init_param;
+cache->reset(0);
+DBUG_RETURN((range_seq_t) init_param);
}
/*
- Get the next range/key over records from the join buffer used by a BKA cache
-
- SYNOPSIS
- bka_range_seq_next()
- seq the value returned by bka_range_seq_init
- range OUT reference to the next range
+Get the next range/key over records from the join buffer used by a BKA cache
- DESCRIPTION
- The function interprets seq as a pointer to a JOIN_CACHE_BKA
- object. The function returns a pointer to the range descriptor
- for the key built over the next record from the join buffer.
-
- NOTE
- This function are used only as a callback function.
-
- RETURN VALUE
- FALSE ok, the range structure filled with info about the next range/key
- TRUE no more ranges
+SYNOPSIS
+ bka_range_seq_next()
+ seq the value returned by bka_range_seq_init
+ range OUT reference to the next range
+
+DESCRIPTION
+ The function interprets seq as a pointer to a JOIN_CACHE_BKA
+ object. The function returns a pointer to the range descriptor
+ for the key built over the next record from the join buffer.
+
+NOTE
+ This function are used only as a callback function.
+
+RETURN VALUE
+ FALSE ok, the range structure filled with info about the next range/key
+ TRUE no more ranges
*/
static
bool bka_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
{
- DBUG_ENTER("bka_range_seq_next");
- JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
- TABLE_REF *ref= &cache->join_tab->ref;
- key_range *start_key= &range->start_key;
- if ((start_key->length= cache->get_next_key((uchar **) &start_key->key)))
- {
- start_key->keypart_map= (1 << ref->key_parts) - 1;
- start_key->flag= HA_READ_KEY_EXACT;
- range->end_key= *start_key;
- range->end_key.flag= HA_READ_AFTER_KEY;
- range->ptr= (char *) cache->get_curr_rec();
- range->range_flag= EQ_RANGE;
- DBUG_RETURN(0);
- }
- DBUG_RETURN(1);
+DBUG_ENTER("bka_range_seq_next");
+JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
+TABLE_REF *ref= &cache->join_tab->ref;
+key_range *start_key= &range->start_key;
+if ((start_key->length= cache->get_next_key((uchar **) &start_key->key)))
+{
+ start_key->keypart_map= (1 << ref->key_parts) - 1;
+ start_key->flag= HA_READ_KEY_EXACT;
+ range->end_key= *start_key;
+ range->end_key.flag= HA_READ_AFTER_KEY;
+ range->ptr= (char *) cache->get_curr_rec();
+ range->range_flag= EQ_RANGE;
+ DBUG_RETURN(0);
+}
+DBUG_RETURN(1);
}
/*
- Check whether range_info orders to skip the next record from BKA buffer
-
- SYNOPSIS
- bka_range_seq_skip_record()
- seq value returned by bka_range_seq_init()
- range_info information about the next range
- rowid [NOT USED] rowid of the record to be checked
+Check whether range_info orders to skip the next record from BKA buffer
-
- DESCRIPTION
- The function interprets seq as a pointer to a JOIN_CACHE_BKA object.
- The function returns TRUE if the record with this range_info
- is to be filtered out from the stream of records returned by
- multi_range_read_next().
-
- NOTE
- This function are used only as a callback function.
+SYNOPSIS
+ bka_range_seq_skip_record()
+ seq value returned by bka_range_seq_init()
+ range_info information about the next range
+ rowid [NOT USED] rowid of the record to be checked
- RETURN VALUE
- 1 record with this range_info is to be filtered out from the stream
- of records returned by multi_range_read_next()
- 0 the record is to be left in the stream
+
+DESCRIPTION
+ The function interprets seq as a pointer to a JOIN_CACHE_BKA object.
+ The function returns TRUE if the record with this range_info
+ is to be filtered out from the stream of records returned by
+ multi_range_read_next().
+
+NOTE
+ This function are used only as a callback function.
+
+RETURN VALUE
+ 1 record with this range_info is to be filtered out from the stream
+ of records returned by multi_range_read_next()
+ 0 the record is to be left in the stream
*/
static
bool bka_range_seq_skip_record(range_seq_t rseq, range_id_t range_info, uchar *rowid)
{
- DBUG_ENTER("bka_range_seq_skip_record");
- JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
- bool res= cache->get_match_flag_by_pos((uchar *) range_info) ==
- JOIN_CACHE::MATCH_FOUND;
- DBUG_RETURN(res);
+DBUG_ENTER("bka_range_seq_skip_record");
+JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
+bool res= cache->get_match_flag_by_pos((uchar *) range_info) ==
+ JOIN_CACHE::MATCH_FOUND;
+DBUG_RETURN(res);
}
/*
- Check if the record combination from BKA cache matches the index condition
+Check if the record combination from BKA cache matches the index condition
- SYNOPSIS
- bka_skip_index_tuple()
- rseq value returned by bka_range_seq_init()
- range_info record chain for the next range/key returned by MRR
-
- DESCRIPTION
- This is wrapper for JOIN_CACHE_BKA::skip_index_tuple method,
- see comments there.
+SYNOPSIS
+ bka_skip_index_tuple()
+ rseq value returned by bka_range_seq_init()
+ range_info record chain for the next range/key returned by MRR
+
+DESCRIPTION
+ This is wrapper for JOIN_CACHE_BKA::skip_index_tuple method,
+ see comments there.
- NOTE
- This function is used as a RANGE_SEQ_IF::skip_index_tuple callback.
-
- RETURN VALUE
- 0 The record combination satisfies the index condition
- 1 Otherwise
+NOTE
+ This function is used as a RANGE_SEQ_IF::skip_index_tuple callback.
+
+RETURN VALUE
+ 0 The record combination satisfies the index condition
+ 1 Otherwise
*/
static
bool bka_skip_index_tuple(range_seq_t rseq, range_id_t range_info)
{
- DBUG_ENTER("bka_skip_index_tuple");
- JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
- THD *thd= cache->thd();
- bool res;
- status_var_increment(thd->status_var.ha_icp_attempts);
- if (!(res= cache->skip_index_tuple(range_info)))
- status_var_increment(thd->status_var.ha_icp_match);
- DBUG_RETURN(res);
+DBUG_ENTER("bka_skip_index_tuple");
+JOIN_CACHE_BKA *cache= (JOIN_CACHE_BKA *) rseq;
+THD *thd= cache->thd();
+bool res;
+status_var_increment(thd->status_var.ha_icp_attempts);
+if (!(res= cache->skip_index_tuple(range_info)))
+ status_var_increment(thd->status_var.ha_icp_match);
+DBUG_RETURN(res);
}
/*
- Prepare to read the record from BKA cache matching the current joined record
-
- SYNOPSIS
- prepare_look_for_matches()
- skip_last <-> ignore the last record in the buffer (always unused here)
-
- DESCRIPTION
- The function prepares to iterate over records in the join cache buffer
- matching the record loaded into the record buffer for join_tab when
- performing join operation by BKA join algorithm. With BKA algorithms the
- record loaded into the record buffer for join_tab always has a direct
- reference to the matching records from the join buffer. When the regular
- BKA join algorithm is employed the record from join_tab can refer to
- only one such record.
- The function sets the counter of the remaining records from the cache
- buffer that would match the current join_tab record to 1.
-
- RETURN VALUE
- TRUE there are no records in the buffer to iterate over
- FALSE otherwise
+Prepare to read the record from BKA cache matching the current joined record
+
+SYNOPSIS
+ prepare_look_for_matches()
+ skip_last <-> ignore the last record in the buffer (always unused here)
+
+DESCRIPTION
+ The function prepares to iterate over records in the join cache buffer
+ matching the record loaded into the record buffer for join_tab when
+ performing join operation by BKA join algorithm. With BKA algorithms the
+ record loaded into the record buffer for join_tab always has a direct
+ reference to the matching records from the join buffer. When the regular
+ BKA join algorithm is employed the record from join_tab can refer to
+ only one such record.
+ The function sets the counter of the remaining records from the cache
+ buffer that would match the current join_tab record to 1.
+
+RETURN VALUE
+ TRUE there are no records in the buffer to iterate over
+ FALSE otherwise
*/
-
+
bool JOIN_CACHE_BKA::prepare_look_for_matches(bool skip_last)
{
- if (!records)
- return TRUE;
- rem_records= 1;
- return FALSE;
+if (!records)
+ return TRUE;
+rem_records= 1;
+return FALSE;
}
/*
- Get the record from the BKA cache matching the current joined record
-
- SYNOPSIS
- get_next_candidate_for_match
-
- DESCRIPTION
- This method is used for iterations over the records from the join
- cache buffer when looking for matches for records from join_tab.
- The method performs the necessary preparations to read the next record
- from the join buffer into the record buffer by the method
- read_next_candidate_for_match, or, to skip the next record from the join
- buffer by the method skip_if_not_needed_match.
- This implementation of the virtual method get_next_candidate_for_match
- just decrements the counter of the records that are to be iterated over
- and returns the value of curr_association as a reference to the position
- of the beginning of the record fields in the buffer.
-
- RETURN VALUE
- pointer to the start of the record fields in the join buffer
- if the there is another record to iterate over, 0 - otherwise.
+Get the record from the BKA cache matching the current joined record
+
+SYNOPSIS
+ get_next_candidate_for_match
+
+DESCRIPTION
+ This method is used for iterations over the records from the join
+ cache buffer when looking for matches for records from join_tab.
+ The method performs the necessary preparations to read the next record
+ from the join buffer into the record buffer by the method
+ read_next_candidate_for_match, or, to skip the next record from the join
+ buffer by the method skip_if_not_needed_match.
+ This implementation of the virtual method get_next_candidate_for_match
+ just decrements the counter of the records that are to be iterated over
+ and returns the value of curr_association as a reference to the position
+ of the beginning of the record fields in the buffer.
+
+RETURN VALUE
+ pointer to the start of the record fields in the join buffer
+ if the there is another record to iterate over, 0 - otherwise.
*/
uchar *JOIN_CACHE_BKA::get_next_candidate_for_match()
{
- if (!rem_records)
- return 0;
- rem_records--;
- return curr_association;
+if (!rem_records)
+ return 0;
+rem_records--;
+return curr_association;
}
/*
- Check whether the matching record from the BKA cache is to be skipped
-
- SYNOPSIS
- skip_next_candidate_for_match
- rec_ptr pointer to the position in the join buffer right after
- the previous record
-
- DESCRIPTION
- This implementation of the virtual function just calls the
- method get_match_flag_by_pos to check whether the record referenced
- by ref_ptr has its match flag set to MATCH_FOUND.
-
- RETURN VALUE
- TRUE the record referenced by rec_ptr has its match flag set to
- MATCH_FOUND
- FALSE otherwise
+Check whether the matching record from the BKA cache is to be skipped
+
+SYNOPSIS
+ skip_next_candidate_for_match
+ rec_ptr pointer to the position in the join buffer right after
+ the previous record
+
+DESCRIPTION
+ This implementation of the virtual function just calls the
+ method get_match_flag_by_pos to check whether the record referenced
+ by ref_ptr has its match flag set to MATCH_FOUND.
+
+RETURN VALUE
+ TRUE the record referenced by rec_ptr has its match flag set to
+ MATCH_FOUND
+ FALSE otherwise
*/
bool JOIN_CACHE_BKA::skip_next_candidate_for_match(uchar *rec_ptr)
{
- return join_tab->check_only_first_match() &&
- (get_match_flag_by_pos(rec_ptr) == MATCH_FOUND);
+return join_tab->check_only_first_match() &&
+ (get_match_flag_by_pos(rec_ptr) == MATCH_FOUND);
}
/*
- Read the next record from the BKA join cache buffer when looking for matches
-
- SYNOPSIS
- read_next_candidate_for_match
- rec_ptr pointer to the position in the join buffer right after
- the previous record
-
- DESCRIPTION
- This implementation of the virtual method read_next_candidate_for_match
- calls the method get_record_by_pos to read the record referenced by rec_ptr
- from the join buffer into the record buffer. If this record refers to
- fields in the other join buffers the call of get_record_by_po ensures that
- these fields are read into the corresponding record buffers as well.
- This function is supposed to be called after a successful call of
- the method get_next_candidate_for_match.
-
- RETURN VALUE
- none
+Read the next record from the BKA join cache buffer when looking for matches
+
+SYNOPSIS
+ read_next_candidate_for_match
+ rec_ptr pointer to the position in the join buffer right after
+ the previous record
+
+DESCRIPTION
+ This implementation of the virtual method read_next_candidate_for_match
+ calls the method get_record_by_pos to read the record referenced by rec_ptr
+ from the join buffer into the record buffer. If this record refers to
+ fields in the other join buffers the call of get_record_by_po ensures that
+ these fields are read into the corresponding record buffers as well.
+ This function is supposed to be called after a successful call of
+ the method get_next_candidate_for_match.
+
+RETURN VALUE
+ none
*/
void JOIN_CACHE_BKA::read_next_candidate_for_match(uchar *rec_ptr)
{
- get_record_by_pos(rec_ptr);
+get_record_by_pos(rec_ptr);
}
/*
- Initialize the BKA join cache
+Initialize the BKA join cache
- SYNOPSIS
- init
+SYNOPSIS
+ init
- DESCRIPTION
- The function initializes the cache structure. It is supposed to be called
- right after a constructor for the JOIN_CACHE_BKA.
+DESCRIPTION
+ The function initializes the cache structure. It is supposed to be called
+ right after a constructor for the JOIN_CACHE_BKA.
- NOTES
- The function first constructs a companion object of the type
- JOIN_TAB_SCAN_MRR, then it calls the init method of the parent class.
-
- RETURN VALUE
- 0 initialization with buffer allocations has been succeeded
- 1 otherwise
+NOTES
+ The function first constructs a companion object of the type
+ JOIN_TAB_SCAN_MRR, then it calls the init method of the parent class.
+
+RETURN VALUE
+ 0 initialization with buffer allocations has been succeeded
+ 1 otherwise
*/
int JOIN_CACHE_BKA::init()
{
- int res;
- bool check_only_first_match= join_tab->check_only_first_match();
+int res;
+bool check_only_first_match= join_tab->check_only_first_match();
- RANGE_SEQ_IF rs_funcs= { bka_range_seq_key_info,
- bka_range_seq_init,
- bka_range_seq_next,
- check_only_first_match ?
- bka_range_seq_skip_record : 0,
- bka_skip_index_tuple };
+RANGE_SEQ_IF rs_funcs= { bka_range_seq_key_info,
+ bka_range_seq_init,
+ bka_range_seq_next,
+ check_only_first_match ?
+ bka_range_seq_skip_record : 0,
+ bka_skip_index_tuple };
- DBUG_ENTER("JOIN_CACHE_BKA::init");
+DBUG_ENTER("JOIN_CACHE_BKA::init");
- JOIN_TAB_SCAN_MRR *jsm;
- if (!(join_tab_scan= jsm= new JOIN_TAB_SCAN_MRR(join, join_tab,
- mrr_mode, rs_funcs)))
- DBUG_RETURN(1);
+JOIN_TAB_SCAN_MRR *jsm;
+if (!(join_tab_scan= jsm= new JOIN_TAB_SCAN_MRR(join, join_tab,
+ mrr_mode, rs_funcs)))
+ DBUG_RETURN(1);
- if ((res= JOIN_CACHE::init()))
- DBUG_RETURN(res);
+if ((res= JOIN_CACHE::init()))
+ DBUG_RETURN(res);
- if (use_emb_key)
- jsm->mrr_mode |= HA_MRR_MATERIALIZED_KEYS;
+if (use_emb_key)
+ jsm->mrr_mode |= HA_MRR_MATERIALIZED_KEYS;
- DBUG_RETURN(0);
+DBUG_RETURN(0);
}
/*
- Get the key built over the next record from BKA join buffer
-
- SYNOPSIS
- get_next_key()
- key pointer to the buffer where the key value is to be placed
-
- DESCRIPTION
- The function reads key fields from the current record in the join buffer.
- and builds the key value out of these fields that will be used to access
- the 'join_tab' table. Some of key fields may belong to previous caches.
- They are accessed via record references to the record parts stored in the
- previous join buffers. The other key fields always are placed right after
- the flag fields of the record.
- If the key is embedded, which means that its value can be read directly
- from the join buffer, then *key is set to the beginning of the key in
- this buffer. Otherwise the key is built in the join_tab->ref->key_buff.
- The function returns the length of the key if it succeeds ro read it.
- If is assumed that the functions starts reading at the position of
- the record length which is provided for each records in a BKA cache.
- After the key is built the 'pos' value points to the first position after
- the current record.
- The function just skips the records with MATCH_IMPOSSIBLE in the
- match flag field if there is any.
- The function returns 0 if the initial position is after the beginning
- of the record fields for last record from the join buffer.
-
- RETURN VALUE
- length of the key value - if the starting value of 'pos' points to
- the position before the fields for the last record,
- 0 - otherwise.
+Get the key built over the next record from BKA join buffer
+
+SYNOPSIS
+ get_next_key()
+ key pointer to the buffer where the key value is to be placed
+
+DESCRIPTION
+ The function reads key fields from the current record in the join buffer.
+ and builds the key value out of these fields that will be used to access
+ the 'join_tab' table. Some of key fields may belong to previous caches.
+ They are accessed via record references to the record parts stored in the
+ previous join buffers. The other key fields always are placed right after
+ the flag fields of the record.
+ If the key is embedded, which means that its value can be read directly
+ from the join buffer, then *key is set to the beginning of the key in
+ this buffer. Otherwise the key is built in the join_tab->ref->key_buff.
+ The function returns the length of the key if it succeeds ro read it.
+ If is assumed that the functions starts reading at the position of
+ the record length which is provided for each records in a BKA cache.
+ After the key is built the 'pos' value points to the first position after
+ the current record.
+ The function just skips the records with MATCH_IMPOSSIBLE in the
+ match flag field if there is any.
+ The function returns 0 if the initial position is after the beginning
+ of the record fields for last record from the join buffer.
+
+RETURN VALUE
+ length of the key value - if the starting value of 'pos' points to
+ the position before the fields for the last record,
+ 0 - otherwise.
*/
uint JOIN_CACHE_BKA::get_next_key(uchar ** key)
{
- uint len;
- uint32 rec_len;
- uchar *init_pos;
- JOIN_CACHE *cache;
-
+uint len;
+uint32 rec_len;
+uchar *init_pos;
+JOIN_CACHE *cache;
+
start:
- /* Any record in a BKA cache is prepended with its length */
- DBUG_ASSERT(with_length);
-
- if ((pos+size_of_rec_len) > last_rec_pos || !records)
- return 0;
+/* Any record in a BKA cache is prepended with its length */
+DBUG_ASSERT(with_length);
+
+if ((pos+size_of_rec_len) > last_rec_pos || !records)
+ return 0;
- /* Read the length of the record */
- rec_len= get_rec_length(pos);
- pos+= size_of_rec_len;
- init_pos= pos;
+/* Read the length of the record */
+rec_len= get_rec_length(pos);
+pos+= size_of_rec_len;
+init_pos= pos;
- /* Read a reference to the previous cache if any */
- if (prev_cache)
- pos+= prev_cache->get_size_of_rec_offset();
+/* Read a reference to the previous cache if any */
+if (prev_cache)
+ pos+= prev_cache->get_size_of_rec_offset();
- curr_rec_pos= pos;
+curr_rec_pos= pos;
- /* Read all flag fields of the record */
- read_flag_fields();
+/* Read all flag fields of the record */
+read_flag_fields();
- if (with_match_flag &&
- (Match_flag) curr_rec_pos[0] == MATCH_IMPOSSIBLE )
- {
- pos= init_pos+rec_len;
- goto start;
- }
-
- if (use_emb_key)
- {
- /* An embedded key is taken directly from the join buffer */
- *key= pos;
- len= emb_key_length;
- }
- else
+if (with_match_flag &&
+ (Match_flag) curr_rec_pos[0] == MATCH_IMPOSSIBLE )
+{
+ pos= init_pos+rec_len;
+ goto start;
+}
+
+if (use_emb_key)
+{
+ /* An embedded key is taken directly from the join buffer */
+ *key= pos;
+ len= emb_key_length;
+}
+else
+{
+ /* Read key arguments from previous caches if there are any such fields */
+ if (external_key_arg_fields)
{
- /* Read key arguments from previous caches if there are any such fields */
- if (external_key_arg_fields)
- {
- uchar *rec_ptr= curr_rec_pos;
- uint key_arg_count= external_key_arg_fields;
- CACHE_FIELD **copy_ptr= blob_ptr-key_arg_count;
- for (cache= prev_cache; key_arg_count; cache= cache->prev_cache)
- {
- uint len= 0;
+ uchar *rec_ptr= curr_rec_pos;
+ uint key_arg_count= external_key_arg_fields;
+ CACHE_FIELD **copy_ptr= blob_ptr-key_arg_count;
+ for (cache= prev_cache; key_arg_count; cache= cache->prev_cache)
+ {
+ uint len= 0;
+ DBUG_ASSERT(cache);
+ rec_ptr= cache->get_rec_ref(rec_ptr);
+ while (!cache->referenced_fields)
+ {
+ cache= cache->prev_cache;
DBUG_ASSERT(cache);
rec_ptr= cache->get_rec_ref(rec_ptr);
- while (!cache->referenced_fields)
- {
- cache= cache->prev_cache;
- DBUG_ASSERT(cache);
- rec_ptr= cache->get_rec_ref(rec_ptr);
- }
- while (key_arg_count &&
- cache->read_referenced_field(*copy_ptr, rec_ptr, &len))
- {
- copy_ptr++;
- --key_arg_count;
- }
+ }
+ while (key_arg_count &&
+ cache->read_referenced_field(*copy_ptr, rec_ptr, &len))
+ {
+ copy_ptr++;
+ --key_arg_count;
}
}
-
- /*
- Read the other key arguments from the current record. The fields for
- these arguments are always first in the sequence of the record's fields.
- */
- CACHE_FIELD *copy= field_descr+flag_fields;
- CACHE_FIELD *copy_end= copy+local_key_arg_fields;
- bool blob_in_rec_buff= blob_data_is_in_rec_buff(curr_rec_pos);
- for ( ; copy < copy_end; copy++)
- read_record_field(copy, blob_in_rec_buff);
-
- /* Build the key over the fields read into the record buffers */
- TABLE_REF *ref= &join_tab->ref;
- cp_buffer_from_ref(join->thd, join_tab->table, ref);
- *key= ref->key_buff;
- len= ref->key_length;
}
+
+ /*
+ Read the other key arguments from the current record. The fields for
+ these arguments are always first in the sequence of the record's fields.
+ */
+ CACHE_FIELD *copy= field_descr+flag_fields;
+ CACHE_FIELD *copy_end= copy+local_key_arg_fields;
+ bool blob_in_rec_buff= blob_data_is_in_rec_buff(curr_rec_pos);
+ for ( ; copy < copy_end; copy++)
+ read_record_field(copy, blob_in_rec_buff);
+
+ /* Build the key over the fields read into the record buffers */
+ TABLE_REF *ref= &join_tab->ref;
+ cp_buffer_from_ref(join->thd, join_tab->table, ref);
+ *key= ref->key_buff;
+ len= ref->key_length;
+}
- pos= init_pos+rec_len;
+pos= init_pos+rec_len;
- return len;
+return len;
}
/*
- Check the index condition of the joined table for a record from the BKA cache
-
- SYNOPSIS
- skip_index_tuple()
- range_info pointer to the record returned by MRR
-
- DESCRIPTION
- This function is invoked from MRR implementation to check if an index
- tuple matches the index condition. It is used in the case where the index
- condition actually depends on both columns of the used index and columns
- from previous tables.
-
- NOTES
- Accessing columns of the previous tables requires special handling with
- BKA. The idea of BKA is to collect record combinations in a buffer and
- then do a batch of ref access lookups, i.e. by the time we're doing a
- lookup its previous-records-combination is not in prev_table->record[0]
- but somewhere in the join buffer.
- We need to get it from there back into prev_table(s)->record[0] before we
- can evaluate the index condition, and that's why we need this function
- instead of regular IndexConditionPushdown.
-
- NOTES
- Possible optimization:
- Before we unpack the record from a previous table
- check if this table is used in the condition.
- If so then unpack the record otherwise skip the unpacking.
- This should be done by a special virtual method
- get_partial_record_by_pos().
+Check the index condition of the joined table for a record from the BKA cache
- RETURN VALUE
- 1 the record combination does not satisfies the index condition
- 0 otherwise
+SYNOPSIS
+ skip_index_tuple()
+ range_info pointer to the record returned by MRR
+
+DESCRIPTION
+ This function is invoked from MRR implementation to check if an index
+ tuple matches the index condition. It is used in the case where the index
+ condition actually depends on both columns of the used index and columns
+ from previous tables.
+
+NOTES
+ Accessing columns of the previous tables requires special handling with
+ BKA. The idea of BKA is to collect record combinations in a buffer and
+ then do a batch of ref access lookups, i.e. by the time we're doing a
+ lookup its previous-records-combination is not in prev_table->record[0]
+ but somewhere in the join buffer.
+ We need to get it from there back into prev_table(s)->record[0] before we
+ can evaluate the index condition, and that's why we need this function
+ instead of regular IndexConditionPushdown.
+
+NOTES
+ Possible optimization:
+ Before we unpack the record from a previous table
+ check if this table is used in the condition.
+ If so then unpack the record otherwise skip the unpacking.
+ This should be done by a special virtual method
+ get_partial_record_by_pos().
+
+RETURN VALUE
+ 1 the record combination does not satisfies the index condition
+ 0 otherwise
*/
bool JOIN_CACHE_BKA::skip_index_tuple(range_id_t range_info)
{
- DBUG_ENTER("JOIN_CACHE_BKA::skip_index_tuple");
- get_record_by_pos((uchar*)range_info);
- DBUG_RETURN(!join_tab->cache_idx_cond->val_int());
+DBUG_ENTER("JOIN_CACHE_BKA::skip_index_tuple");
+get_record_by_pos((uchar*)range_info);
+DBUG_RETURN(!join_tab->cache_idx_cond->val_int());
}
/*
- Initialize retrieval of range sequence for the BKAH join algorithm
-
- SYNOPSIS
- bkah_range_seq_init()
- init_params pointer to the BKAH join cache object
- n_ranges the number of ranges obtained
- flags combination of MRR flags
-
- DESCRIPTION
- The function interprets init_param as a pointer to a JOIN_CACHE_BKAH
- object. The function prepares for an iteration over distinct join keys
- built over the records from the cache join buffer.
-
- NOTE
- This function are used only as a callback function.
-
- RETURN VALUE
- init_param value that is to be used as a parameter of
- bkah_range_seq_next()
+Initialize retrieval of range sequence for the BKAH join algorithm
+
+SYNOPSIS
+ bkah_range_seq_init()
+ init_params pointer to the BKAH join cache object
+ n_ranges the number of ranges obtained
+ flags combination of MRR flags
+
+DESCRIPTION
+ The function interprets init_param as a pointer to a JOIN_CACHE_BKAH
+ object. The function prepares for an iteration over distinct join keys
+ built over the records from the cache join buffer.
+
+NOTE
+ This function are used only as a callback function.
+
+RETURN VALUE
+ init_param value that is to be used as a parameter of
+ bkah_range_seq_next()
*/
static
range_seq_t bkah_range_seq_init(void *init_param, uint n_ranges, uint flags)
{
- DBUG_ENTER("bkah_range_seq_init");
- JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) init_param;
- cache->reset(0);
- DBUG_RETURN((range_seq_t) init_param);
+DBUG_ENTER("bkah_range_seq_init");
+JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) init_param;
+cache->reset(0);
+DBUG_RETURN((range_seq_t) init_param);
}
/*
- Get the next range/key over records from the join buffer of a BKAH cache
-
- SYNOPSIS
- bkah_range_seq_next()
- seq value returned by bkah_range_seq_init()
- range OUT reference to the next range
+Get the next range/key over records from the join buffer of a BKAH cache
- DESCRIPTION
- The function interprets seq as a pointer to a JOIN_CACHE_BKAH
- object. The function returns a pointer to the range descriptor
- for the next unique key built over records from the join buffer.
-
- NOTE
- This function are used only as a callback function.
-
- RETURN VALUE
- FALSE ok, the range structure filled with info about the next range/key
- TRUE no more ranges
+SYNOPSIS
+ bkah_range_seq_next()
+ seq value returned by bkah_range_seq_init()
+ range OUT reference to the next range
+
+DESCRIPTION
+ The function interprets seq as a pointer to a JOIN_CACHE_BKAH
+ object. The function returns a pointer to the range descriptor
+ for the next unique key built over records from the join buffer.
+
+NOTE
+ This function are used only as a callback function.
+
+RETURN VALUE
+ FALSE ok, the range structure filled with info about the next range/key
+ TRUE no more ranges
*/
static
bool bkah_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
{
- DBUG_ENTER("bkah_range_seq_next");
- JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
- TABLE_REF *ref= &cache->join_tab->ref;
- key_range *start_key= &range->start_key;
- if ((start_key->length= cache->get_next_key((uchar **) &start_key->key)))
- {
- start_key->keypart_map= (1 << ref->key_parts) - 1;
- start_key->flag= HA_READ_KEY_EXACT;
- range->end_key= *start_key;
- range->end_key.flag= HA_READ_AFTER_KEY;
- range->ptr= (char *) cache->get_curr_key_chain();
- range->range_flag= EQ_RANGE;
- DBUG_RETURN(0);
- }
- DBUG_RETURN(1);
+DBUG_ENTER("bkah_range_seq_next");
+JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
+TABLE_REF *ref= &cache->join_tab->ref;
+key_range *start_key= &range->start_key;
+if ((start_key->length= cache->get_next_key((uchar **) &start_key->key)))
+{
+ start_key->keypart_map= (1 << ref->key_parts) - 1;
+ start_key->flag= HA_READ_KEY_EXACT;
+ range->end_key= *start_key;
+ range->end_key.flag= HA_READ_AFTER_KEY;
+ range->ptr= (char *) cache->get_curr_key_chain();
+ range->range_flag= EQ_RANGE;
+ DBUG_RETURN(0);
+}
+DBUG_RETURN(1);
}
/*
- Check whether range_info orders to skip the next record from BKAH join buffer
+Check whether range_info orders to skip the next record from BKAH join buffer
- SYNOPSIS
- bkah_range_seq_skip_record()
- seq value returned by bkah_range_seq_init()
- range_info information about the next range/key returned by MRR
- rowid [NOT USED] rowid of the record to be checked (not used)
-
- DESCRIPTION
- The function interprets seq as a pointer to a JOIN_CACHE_BKAH
- object. The function returns TRUE if the record with this range_info
- is to be filtered out from the stream of records returned by
- multi_range_read_next().
-
- NOTE
- This function are used only as a callback function.
-
- RETURN VALUE
- 1 record with this range_info is to be filtered out from the stream
- of records returned by multi_range_read_next()
- 0 the record is to be left in the stream
+SYNOPSIS
+ bkah_range_seq_skip_record()
+ seq value returned by bkah_range_seq_init()
+ range_info information about the next range/key returned by MRR
+ rowid [NOT USED] rowid of the record to be checked (not used)
+
+DESCRIPTION
+ The function interprets seq as a pointer to a JOIN_CACHE_BKAH
+ object. The function returns TRUE if the record with this range_info
+ is to be filtered out from the stream of records returned by
+ multi_range_read_next().
+
+NOTE
+ This function are used only as a callback function.
+
+RETURN VALUE
+ 1 record with this range_info is to be filtered out from the stream
+ of records returned by multi_range_read_next()
+ 0 the record is to be left in the stream
*/
static
bool bkah_range_seq_skip_record(range_seq_t rseq, range_id_t range_info,
- uchar *rowid)
+ uchar *rowid)
{
- DBUG_ENTER("bkah_range_seq_skip_record");
- JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
- bool res= cache->check_all_match_flags_for_key((uchar *) range_info);
- DBUG_RETURN(res);
+DBUG_ENTER("bkah_range_seq_skip_record");
+JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
+bool res= cache->check_all_match_flags_for_key((uchar *) range_info);
+DBUG_RETURN(res);
}
-
+
/*
- Check if the record combination from BKAH cache matches the index condition
+Check if the record combination from BKAH cache matches the index condition
- SYNOPSIS
- bkah_skip_index_tuple()
- rseq value returned by bka_range_seq_init()
- range_info record chain for the next range/key returned by MRR
-
- DESCRIPTION
- This is wrapper for JOIN_CACHE_BKA_UNIQUE::skip_index_tuple method,
- see comments there.
+SYNOPSIS
+ bkah_skip_index_tuple()
+ rseq value returned by bka_range_seq_init()
+ range_info record chain for the next range/key returned by MRR
+
+DESCRIPTION
+ This is wrapper for JOIN_CACHE_BKA_UNIQUE::skip_index_tuple method,
+ see comments there.
- NOTE
- This function is used as a RANGE_SEQ_IF::skip_index_tuple callback.
-
- RETURN VALUE
- 0 some records from the chain satisfy the index condition
- 1 otherwise
+NOTE
+ This function is used as a RANGE_SEQ_IF::skip_index_tuple callback.
+
+RETURN VALUE
+ 0 some records from the chain satisfy the index condition
+ 1 otherwise
*/
static
bool bkah_skip_index_tuple(range_seq_t rseq, range_id_t range_info)
{
- DBUG_ENTER("bka_unique_skip_index_tuple");
- JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
- THD *thd= cache->thd();
- bool res;
- status_var_increment(thd->status_var.ha_icp_attempts);
- if (!(res= cache->skip_index_tuple(range_info)))
- status_var_increment(thd->status_var.ha_icp_match);
- DBUG_RETURN(res);
+DBUG_ENTER("bka_unique_skip_index_tuple");
+JOIN_CACHE_BKAH *cache= (JOIN_CACHE_BKAH *) rseq;
+THD *thd= cache->thd();
+bool res;
+status_var_increment(thd->status_var.ha_icp_attempts);
+if (!(res= cache->skip_index_tuple(range_info)))
+ status_var_increment(thd->status_var.ha_icp_match);
+DBUG_RETURN(res);
}
/*
- Prepare to read record from BKAH cache matching the current joined record
-
- SYNOPSIS
- prepare_look_for_matches()
- skip_last <-> ignore the last record in the buffer (always unused here)
-
- DESCRIPTION
- The function prepares to iterate over records in the join cache buffer
- matching the record loaded into the record buffer for join_tab when
- performing join operation by BKAH join algorithm. With BKAH algorithm, if
- association labels are used, then record loaded into the record buffer
- for join_tab always has a direct reference to the chain of the mathing
- records from the join buffer. If association labels are not used then
- then the chain of the matching records is obtained by the call of the
- get_key_chain_by_join_key function.
-
- RETURN VALUE
- TRUE there are no records in the buffer to iterate over
- FALSE otherwise
+Prepare to read record from BKAH cache matching the current joined record
+
+SYNOPSIS
+ prepare_look_for_matches()
+ skip_last <-> ignore the last record in the buffer (always unused here)
+
+DESCRIPTION
+ The function prepares to iterate over records in the join cache buffer
+ matching the record loaded into the record buffer for join_tab when
+ performing join operation by BKAH join algorithm. With BKAH algorithm, if
+ association labels are used, then record loaded into the record buffer
+ for join_tab always has a direct reference to the chain of the mathing
+ records from the join buffer. If association labels are not used then
+ then the chain of the matching records is obtained by the call of the
+ get_key_chain_by_join_key function.
+
+RETURN VALUE
+ TRUE there are no records in the buffer to iterate over
+ FALSE otherwise
*/
-
+
bool JOIN_CACHE_BKAH::prepare_look_for_matches(bool skip_last)
{
- last_matching_rec_ref_ptr= next_matching_rec_ref_ptr= 0;
- if (no_association &&
- !(curr_matching_chain= get_matching_chain_by_join_key()))
+last_matching_rec_ref_ptr= next_matching_rec_ref_ptr= 0;
+if (no_association &&
+ !(curr_matching_chain= get_matching_chain_by_join_key())) //psergey: added '!'
return 1;
last_matching_rec_ref_ptr= get_next_rec_ref(curr_matching_chain);
return 0;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index b3ee0636bd7..b8ce3b6720e 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -459,6 +459,9 @@ void lex_start(THD *thd)
lex->set_var_list.empty();
lex->param_list.empty();
lex->view_list.empty();
+ lex->with_persistent_for_clause= FALSE;
+ lex->column_list= NULL;
+ lex->index_list= NULL;
lex->prepared_stmt_params.empty();
lex->auxiliary_table_list.empty();
lex->unit.next= lex->unit.master=
@@ -504,6 +507,7 @@ void lex_start(THD *thd)
lex->expr_allows_subselect= TRUE;
lex->use_only_table_context= FALSE;
lex->parse_vcol_expr= FALSE;
+ lex->verbose= 0;
lex->name.str= 0;
lex->name.length= 0;
@@ -1526,10 +1530,11 @@ int lex_one_token(void *arg, void *yythd)
/*
The special comment format is very strict:
'/' '*' '!', followed by an optional 'M' and exactly
- 1 digit (major), 2 digits (minor), then 2 digits (dot).
- 32302 -> 3.23.02
- 50032 -> 5.0.32
- 50114 -> 5.1.14
+ 1-2 digits (major), 2 digits (minor), then 2 digits (dot).
+ 32302 -> 3.23.02
+ 50032 -> 5.0.32
+ 50114 -> 5.1.14
+ 100000 -> 10.0.0
*/
if ( my_isdigit(cs, lip->yyPeekn(0))
&& my_isdigit(cs, lip->yyPeekn(1))
@@ -1539,14 +1544,21 @@ int lex_one_token(void *arg, void *yythd)
)
{
ulong version;
- char *end_ptr= (char*) lip->get_ptr()+5;
+ uint length= 5;
+ char *end_ptr= (char*) lip->get_ptr()+length;
int error;
+ if (my_isdigit(cs, lip->yyPeekn(5)))
+ {
+ end_ptr++; // 6 digit number
+ length++;
+ }
+
version= (ulong) my_strtoll10(lip->get_ptr(), &end_ptr, &error);
if (version <= MYSQL_VERSION_ID)
{
/* Accept 'M' 'm' 'm' 'd' 'd' */
- lip->yySkipn(5);
+ lip->yySkipn(length);
/* Expand the content of the special comment as real code */
lip->set_echo(TRUE);
state=MY_LEX_START;
@@ -1876,6 +1888,7 @@ void st_select_lex::init_query()
nest_level= 0;
link_next= 0;
is_prep_leaf_list_saved= FALSE;
+ have_merged_subqueries= FALSE;
bzero((char*) expr_cache_may_be_used, sizeof(expr_cache_may_be_used));
m_non_agg_field_used= false;
m_agg_func_used= false;
@@ -2541,7 +2554,7 @@ LEX::LEX()
my_init_dynamic_array2(&plugins, sizeof(plugin_ref),
plugins_static_buffer,
INITIAL_LEX_PLUGIN_LIST_SIZE,
- INITIAL_LEX_PLUGIN_LIST_SIZE);
+ INITIAL_LEX_PLUGIN_LIST_SIZE, 0);
reset_query_tables_list(TRUE);
mi.init();
}
@@ -3467,7 +3480,7 @@ bool st_select_lex::optimize_unflattened_subqueries(bool const_only)
if (options & SELECT_DESCRIBE)
{
/* Optimize the subquery in the context of EXPLAIN. */
- sl->set_explain_type();
+ sl->set_explain_type(FALSE);
sl->options|= SELECT_DESCRIBE;
inner_join->select_options|= SELECT_DESCRIBE;
}
@@ -3917,9 +3930,12 @@ void st_select_lex::update_correlated_cache()
/**
Set the EXPLAIN type for this subquery.
+
+ @param on_the_fly TRUE<=> We're running a SHOW EXPLAIN command, so we must
+ not change any variables
*/
-void st_select_lex::set_explain_type()
+void st_select_lex::set_explain_type(bool on_the_fly)
{
bool is_primary= FALSE;
if (next_select())
@@ -3941,6 +3957,9 @@ void st_select_lex::set_explain_type()
}
}
+ if (on_the_fly && !is_primary && have_merged_subqueries)
+ is_primary= TRUE;
+
SELECT_LEX *first= master_unit()->first_select();
/* drop UNCACHEABLE_EXPLAIN, because it is for internal usage only */
uint8 is_uncacheable= (uncacheable & ~UNCACHEABLE_EXPLAIN);
@@ -3993,10 +4012,15 @@ void st_select_lex::set_explain_type()
else
{
type= is_uncacheable ? "UNCACHEABLE UNION": "UNION";
+ if (this == master_unit()->fake_select_lex)
+ type= "UNION RESULT";
+
}
}
}
- options|= SELECT_DESCRIBE;
+
+ if (!on_the_fly)
+ options|= SELECT_DESCRIBE;
}
@@ -4143,6 +4167,116 @@ bool st_select_lex::is_merged_child_of(st_select_lex *ancestor)
}
+int print_explain_message_line(select_result_sink *result,
+ SELECT_LEX *select_lex,
+ bool on_the_fly,
+ uint8 options,
+ const char *message);
+
+
+int st_select_lex::print_explain(select_result_sink *output,
+ uint8 explain_flags,
+ bool *printed_anything)
+{
+ int res;
+ if (join && join->have_query_plan == JOIN::QEP_AVAILABLE)
+ {
+ /*
+ There is a number of reasons join can be marked as degenerate, so all
+ three conditions below can happen simultaneously, or individually:
+ */
+ *printed_anything= TRUE;
+ if (!join->table_count || !join->tables_list || join->zero_result_cause)
+ {
+ /* It's a degenerate join */
+ const char *cause= join->zero_result_cause ? join-> zero_result_cause :
+ "No tables used";
+ res= join->print_explain(output, explain_flags, TRUE, FALSE, FALSE,
+ FALSE, cause);
+ }
+ else
+ {
+ res= join->print_explain(output, explain_flags, TRUE,
+ join->need_tmp, // need_tmp_table
+ !join->skip_sort_order && !join->no_order &&
+ (join->order || join->group_list), // bool need_order
+ join->select_distinct, // bool distinct
+ NULL); //const char *message
+ }
+ if (res)
+ goto err;
+
+ for (SELECT_LEX_UNIT *unit= join->select_lex->first_inner_unit();
+ unit;
+ unit= unit->next_unit())
+ {
+ /*
+ Display subqueries only if they are not parts of eliminated WHERE/ON
+ clauses.
+ */
+ if (!(unit->item && unit->item->eliminated))
+ {
+ if ((res= unit->print_explain(output, explain_flags, printed_anything)))
+ goto err;
+ }
+ }
+ }
+ else
+ {
+ const char *msg;
+ if (!join)
+ DBUG_ASSERT(0); /* Seems not to be possible */
+
+ /* Not printing anything useful, don't touch *printed_anything here */
+ if (join->have_query_plan == JOIN::QEP_NOT_PRESENT_YET)
+ msg= "Not yet optimized";
+ else
+ {
+ DBUG_ASSERT(join->have_query_plan == JOIN::QEP_DELETED);
+ msg= "Query plan already deleted";
+ }
+ res= print_explain_message_line(output, this, TRUE /* on_the_fly */,
+ 0, msg);
+ }
+err:
+ return res;
+}
+
+
+int st_select_lex_unit::print_explain(select_result_sink *output,
+ uint8 explain_flags, bool *printed_anything)
+{
+ int res= 0;
+ SELECT_LEX *first= first_select();
+
+ if (first && !first->next_select() && !first->join)
+ {
+ /*
+ If there is only one child, 'first', and it has join==NULL, emit "not in
+ EXPLAIN state" error.
+ */
+ const char *msg="Query plan already deleted";
+ res= print_explain_message_line(output, first, TRUE /* on_the_fly */,
+ 0, msg);
+ return 0;
+ }
+
+ for (SELECT_LEX *sl= first; sl; sl= sl->next_select())
+ {
+ if ((res= sl->print_explain(output, explain_flags, printed_anything)))
+ break;
+ }
+
+ /* Note: fake_select_lex->join may be NULL or non-NULL at this point */
+ if (fake_select_lex)
+ {
+ res= print_fake_select_lex_join(output, TRUE /* on the fly */,
+ fake_select_lex, explain_flags);
+ }
+ return res;
+}
+
+
/**
A routine used by the parser to decide whether we are specifying a full
partitioning or if only partitions to add or to split.
@@ -4308,6 +4442,3 @@ void binlog_unsafe_map_init()
}
#endif
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class Mem_root_array<ORDER*, true>;
-#endif
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 2f3214646de..be7cdd8ca1d 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -156,7 +156,8 @@ enum enum_sql_command {
SQLCOM_FLUSH, SQLCOM_KILL, SQLCOM_ANALYZE,
SQLCOM_ROLLBACK, SQLCOM_ROLLBACK_TO_SAVEPOINT,
SQLCOM_COMMIT, SQLCOM_SAVEPOINT, SQLCOM_RELEASE_SAVEPOINT,
- SQLCOM_SLAVE_START, SQLCOM_SLAVE_STOP,
+ SQLCOM_SLAVE_START, SQLCOM_SLAVE_ALL_START,
+ SQLCOM_SLAVE_STOP, SQLCOM_SLAVE_ALL_STOP,
SQLCOM_BEGIN, SQLCOM_CHANGE_MASTER,
SQLCOM_RENAME_TABLE,
SQLCOM_RESET, SQLCOM_PURGE, SQLCOM_PURGE_BEFORE, SQLCOM_SHOW_BINLOGS,
@@ -193,6 +194,7 @@ enum enum_sql_command {
SQLCOM_SHOW_RELAYLOG_EVENTS,
SQLCOM_SHOW_USER_STATS, SQLCOM_SHOW_TABLE_STATS, SQLCOM_SHOW_INDEX_STATS,
SQLCOM_SHOW_CLIENT_STATS,
+ SQLCOM_SHOW_EXPLAIN,
/*
When a command is added here, be sure it's also added in mysqld.cc
@@ -286,6 +288,7 @@ struct LEX_MASTER_INFO
char *host, *user, *password, *log_file_name;
char *ssl_key, *ssl_cert, *ssl_ca, *ssl_capath, *ssl_cipher;
char *relay_log_name;
+ LEX_STRING connection_name;
ulonglong pos;
ulong relay_log_pos;
ulong server_id;
@@ -302,7 +305,7 @@ struct LEX_MASTER_INFO
{
bzero(this, sizeof(*this));
my_init_dynamic_array(&repl_ignore_server_ids,
- sizeof(::server_id), 0, 16);
+ sizeof(::server_id), 0, 16, MYF(0));
}
void reset()
{
@@ -355,6 +358,8 @@ typedef uchar index_clause_map;
#define INDEX_HINT_MASK_ALL (INDEX_HINT_MASK_JOIN | INDEX_HINT_MASK_GROUP | \
INDEX_HINT_MASK_ORDER)
+class select_result_sink;
+
/* Single element of an USE/FORCE/IGNORE INDEX list specified as a SQL hint */
class Index_hint : public Sql_alloc
{
@@ -715,6 +720,8 @@ public:
friend int subselect_union_engine::exec();
List<Item> *get_unit_column_types();
+ int print_explain(select_result_sink *output, uint8 explain_flags,
+ bool *printed_anything);
};
typedef class st_select_lex_unit SELECT_LEX_UNIT;
@@ -775,6 +782,12 @@ public:
those converted to jtbm nests. The list is emptied when conversion is done.
*/
List<Item_in_subselect> sj_subselects;
+
+ /*
+ Needed to correctly generate 'PRIMARY' or 'SIMPLE' for select_type column
+ of EXPLAIN
+ */
+ bool have_merged_subqueries;
List<TABLE_LIST> leaf_tables;
List<TABLE_LIST> leaf_tables_exec;
@@ -999,7 +1012,7 @@ public:
bool is_part_of_union() { return master_unit()->is_union(); }
bool optimize_unflattened_subqueries(bool const_only);
/* Set the EXPLAIN type for this subquery. */
- void set_explain_type();
+ void set_explain_type(bool on_the_fly);
bool handle_derived(LEX *lex, uint phases);
void append_table_to_list(TABLE_LIST *TABLE_LIST::*link, TABLE_LIST *table);
bool get_free_table_map(table_map *map, uint *tablenr);
@@ -1023,8 +1036,10 @@ public:
bool save_leaf_tables(THD *thd);
bool save_prep_leaf_tables(THD *thd);
- bool is_merged_child_of(st_select_lex *ancestor);
+ bool is_merged_child_of(st_select_lex *ancestor);
+ int print_explain(select_result_sink *output, uint8 explain_flags,
+ bool *printed_anything);
/*
For MODE_ONLY_FULL_GROUP_BY we need to maintain two flags:
- Non-aggregated fields are used in this select.
@@ -2346,7 +2361,7 @@ struct LEX: public Query_tables_list
char *backup_dir; /* For RESTORE/BACKUP */
char* to_log; /* For PURGE MASTER LOGS TO */
char* x509_subject,*x509_issuer,*ssl_cipher;
- String *wild;
+ String *wild; /* Wildcard in SHOW {something} LIKE 'wild'*/
sql_exchange *exchange;
select_result *result;
Item *default_value, *on_update_value;
@@ -2391,6 +2406,8 @@ struct LEX: public Query_tables_list
List<Item_func_set_user_var> set_var_list; // in-query assignment list
List<Item_param> param_list;
List<LEX_STRING> view_list; // view list (list of field names in view)
+ List<LEX_STRING> *column_list; // list of column names (in ANALYZE)
+ List<LEX_STRING> *index_list; // list of index names (in ANALYZE)
/*
A stack of name resolution contexts for the query. This stack is used
at parse time to set local name resolution contexts for various parts
@@ -2416,6 +2433,7 @@ struct LEX: public Query_tables_list
KEY_CREATE_INFO key_create_info;
LEX_MASTER_INFO mi; // used by CHANGE MASTER
LEX_SERVER_OPTIONS server_options;
+ LEX_STRING relay_log_connection_name;
USER_RESOURCES mqh;
LEX_RESET_SLAVE reset_slave_info;
ulong type;
@@ -2450,6 +2468,7 @@ struct LEX: public Query_tables_list
this command.
*/
bool parse_vcol_expr;
+ bool with_persistent_for_clause; // uses PERSISTENT FOR clause (in ANALYZE)
enum SSL_type ssl_type; /* defined in violite.h */
enum enum_duplicates duplicates;
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 6c27a9d123a..6a4712ca5b5 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -273,7 +273,6 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
for (field=table->field; *field ; field++)
fields_vars.push_back(new Item_field(*field));
bitmap_set_all(table->write_set);
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
/*
Let us also prepare SET clause, altough it is probably empty
in this case.
@@ -289,21 +288,9 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
setup_fields(thd, 0, set_fields, MARK_COLUMNS_WRITE, 0, 0) ||
check_that_all_fields_are_given_values(thd, table, table_list))
DBUG_RETURN(TRUE);
- /*
- Check whenever TIMESTAMP field with auto-set feature specified
- explicitly.
- */
- if (table->timestamp_field)
- {
- if (bitmap_is_set(table->write_set,
- table->timestamp_field->field_index))
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
- else
- {
- bitmap_set_bit(table->write_set,
- table->timestamp_field->field_index);
- }
- }
+ /* Add all fields with default functions to table->write_set. */
+ if (table->default_field)
+ table->mark_default_fields_for_write();
/* Fix the expressions in SET clause */
if (setup_fields(thd, 0, set_values, MARK_COLUMNS_READ, 0, 0))
DBUG_RETURN(TRUE);
@@ -375,11 +362,11 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
MY_RETURN_REAL_PATH);
}
- if (thd->slave_thread)
+ if (thd->rli_slave)
{
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
- if (strncmp(active_mi->rli.slave_patternload_file, name,
- active_mi->rli.slave_patternload_file_size))
+ if (strncmp(thd->rli_slave->slave_patternload_file, name,
+ thd->rli_slave->slave_patternload_file_size))
{
/*
LOAD DATA INFILE in the slave SQL Thread can only read from
@@ -849,8 +836,12 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
ER_WARN_TOO_FEW_RECORDS,
ER(ER_WARN_TOO_FEW_RECORDS),
thd->warning_info->current_row_for_warning());
+ /*
+ Timestamp fields that are NOT NULL are autoupdated if there is no
+ corresponding value in the data file.
+ */
if (!field->maybe_null() && field->type() == FIELD_TYPE_TIMESTAMP)
- ((Field_timestamp*) field)->set_time();
+ field->set_time();
}
else
{
@@ -865,6 +856,8 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if ((pos+=length) > read_info.row_end)
pos= read_info.row_end; /* Fills rest with space */
}
+ /* Do not auto-update this field. */
+ field->set_has_explicit_value();
}
if (pos != read_info.row_end)
{
@@ -876,10 +869,10 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
}
if (thd->killed ||
- fill_record_n_invoke_before_triggers(thd, set_fields, set_values,
+ fill_record_n_invoke_before_triggers(thd, table, set_fields, set_values,
ignore_check_option_errors,
- table->triggers,
- TRG_EVENT_INSERT))
+ TRG_EVENT_INSERT) ||
+ (table->default_field && table->update_default_fields()))
DBUG_RETURN(1);
switch (table_list->view_check_option(thd,
@@ -996,12 +989,18 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
field->set_null();
if (!field->maybe_null())
{
+ /*
+ Timestamp fields that are NOT NULL are autoupdated if there is no
+ corresponding value in the data file.
+ */
if (field->type() == MYSQL_TYPE_TIMESTAMP)
- ((Field_timestamp*) field)->set_time();
+ field->set_time();
else if (field != table->next_number_field)
field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_NULL_TO_NOTNULL, 1);
}
+ /* Do not auto-update this field. */
+ field->set_has_explicit_value();
}
else if (item->type() == Item::STRING_ITEM)
{
@@ -1025,6 +1024,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (field == table->next_number_field)
table->auto_increment_field_not_null= TRUE;
field->store((char*) pos, length, read_info.read_charset);
+ field->set_has_explicit_value();
}
else if (item->type() == Item::STRING_ITEM)
{
@@ -1066,7 +1066,8 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
DBUG_RETURN(1);
}
if (!field->maybe_null() && field->type() == FIELD_TYPE_TIMESTAMP)
- ((Field_timestamp*) field)->set_time();
+ field->set_time();
+ field->set_has_explicit_value();
/*
TODO: We probably should not throw warning for each field.
But how about intention to always have the same number
@@ -1093,10 +1094,10 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
}
if (thd->killed ||
- fill_record_n_invoke_before_triggers(thd, set_fields, set_values,
+ fill_record_n_invoke_before_triggers(thd, table, set_fields, set_values,
ignore_check_option_errors,
- table->triggers,
- TRG_EVENT_INSERT))
+ TRG_EVENT_INSERT) ||
+ (table->default_field && table->update_default_fields()))
DBUG_RETURN(1);
switch (table_list->view_check_option(thd,
@@ -1206,11 +1207,13 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (!field->maybe_null())
{
if (field->type() == FIELD_TYPE_TIMESTAMP)
- ((Field_timestamp *) field)->set_time();
+ field->set_time();
else if (field != table->next_number_field)
field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_NULL_TO_NOTNULL, 1);
}
+ /* Do not auto-update this field. */
+ field->set_has_explicit_value();
}
else
((Item_user_var_as_out_param *) item)->set_null_value(cs);
@@ -1225,6 +1228,7 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (field == table->next_number_field)
table->auto_increment_field_not_null= TRUE;
field->store((char *) tag->value.ptr(), tag->value.length(), cs);
+ field->set_has_explicit_value();
}
else
((Item_user_var_as_out_param *) item)->set_value(
@@ -1269,10 +1273,10 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
}
if (thd->killed ||
- fill_record_n_invoke_before_triggers(thd, set_fields, set_values,
+ fill_record_n_invoke_before_triggers(thd, table, set_fields, set_values,
ignore_check_option_errors,
- table->triggers,
- TRG_EVENT_INSERT))
+ TRG_EVENT_INSERT) ||
+ (table->default_field && table->update_default_fields()))
DBUG_RETURN(1);
switch (table_list->view_check_option(thd,
@@ -1367,7 +1371,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs,
set_if_bigger(length,line_start.length());
stack=stack_pos=(int*) sql_alloc(sizeof(int)*length);
- if (!(buffer=(uchar*) my_malloc(buff_length+1,MYF(0))))
+ if (!(buffer=(uchar*) my_malloc(buff_length+1,MYF(MY_THREAD_SPECIFIC))))
error=1; /* purecov: inspected */
else
{
@@ -1375,7 +1379,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs,
if (init_io_cache(&cache,(get_it_from_net) ? -1 : file, 0,
(get_it_from_net) ? READ_NET :
(is_fifo ? READ_FIFO : READ_CACHE),0L,1,
- MYF(MY_WME)))
+ MYF(MY_WME | MY_THREAD_SPECIFIC)))
{
my_free(buffer); /* purecov: inspected */
buffer= NULL;
@@ -1601,7 +1605,7 @@ int READ_INFO::read_field()
** We come here if buffer is too small. Enlarge it and continue
*/
if (!(new_buffer=(uchar*) my_realloc((char*) buffer,buff_length+1+IO_SIZE,
- MYF(MY_WME))))
+ MYF(MY_WME | MY_THREAD_SPECIFIC))))
return (error=1);
to=new_buffer + (to-buffer);
buffer=new_buffer;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 6ea0dcbeb8e..b125047cb98 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -82,6 +82,7 @@
#include <myisam.h>
#include <my_dir.h>
#include "rpl_handler.h"
+#include "rpl_mi.h"
#include "sp_head.h"
#include "sp.h"
@@ -268,12 +269,14 @@ void init_update_queries(void)
CF_CAN_GENERATE_ROW_EVENTS;
sql_command_flags[SQLCOM_CREATE_INDEX]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_ALTER_TABLE]= CF_CHANGES_DATA | CF_WRITE_LOGS_COMMAND |
- CF_AUTO_COMMIT_TRANS | CF_REPORT_PROGRESS ;
+ CF_AUTO_COMMIT_TRANS | CF_REPORT_PROGRESS |
+ CF_INSERTS_DATA;
sql_command_flags[SQLCOM_TRUNCATE]= CF_CHANGES_DATA | CF_WRITE_LOGS_COMMAND |
CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_TABLE]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_LOAD]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
- CF_CAN_GENERATE_ROW_EVENTS | CF_REPORT_PROGRESS;
+ CF_CAN_GENERATE_ROW_EVENTS | CF_REPORT_PROGRESS |
+ CF_INSERTS_DATA;
sql_command_flags[SQLCOM_CREATE_DB]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_DB]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_ALTER_DB_UPGRADE]= CF_AUTO_COMMIT_TRANS;
@@ -290,21 +293,21 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_DROP_EVENT]= CF_CHANGES_DATA | CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_UPDATE]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
- CF_CAN_GENERATE_ROW_EVENTS;
+ CF_CAN_GENERATE_ROW_EVENTS | CF_UPDATES_DATA;
sql_command_flags[SQLCOM_UPDATE_MULTI]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
- CF_CAN_GENERATE_ROW_EVENTS;
+ CF_CAN_GENERATE_ROW_EVENTS | CF_UPDATES_DATA;
sql_command_flags[SQLCOM_INSERT]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
- CF_CAN_GENERATE_ROW_EVENTS;
+ CF_CAN_GENERATE_ROW_EVENTS | CF_INSERTS_DATA;
sql_command_flags[SQLCOM_INSERT_SELECT]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
- CF_CAN_GENERATE_ROW_EVENTS;
+ CF_CAN_GENERATE_ROW_EVENTS | CF_INSERTS_DATA;
sql_command_flags[SQLCOM_DELETE]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS;
sql_command_flags[SQLCOM_DELETE_MULTI]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS;
sql_command_flags[SQLCOM_REPLACE]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
- CF_CAN_GENERATE_ROW_EVENTS;
+ CF_CAN_GENERATE_ROW_EVENTS | CF_INSERTS_DATA;
sql_command_flags[SQLCOM_REPLACE_SELECT]= CF_CHANGES_DATA | CF_REEXECUTION_FRAGILE |
- CF_CAN_GENERATE_ROW_EVENTS;
+ CF_CAN_GENERATE_ROW_EVENTS | CF_INSERTS_DATA;
sql_command_flags[SQLCOM_SELECT]= CF_REEXECUTION_FRAGILE |
CF_CAN_GENERATE_ROW_EVENTS;
sql_command_flags[SQLCOM_SET_OPTION]= CF_REEXECUTION_FRAGILE | CF_AUTO_COMMIT_TRANS;
@@ -335,6 +338,7 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_SHOW_ENGINE_STATUS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_ENGINE_MUTEX]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_ENGINE_LOGS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_EXPLAIN]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_PROCESSLIST]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_GRANTS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_CREATE_DB]= CF_STATUS_COMMAND;
@@ -350,7 +354,7 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_SHOW_CREATE_EVENT]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_PROFILES]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_PROFILE]= CF_STATUS_COMMAND;
- sql_command_flags[SQLCOM_BINLOG_BASE64_EVENT]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_BINLOG_BASE64_EVENT]= CF_STATUS_COMMAND | CF_CAN_GENERATE_ROW_EVENTS;
sql_command_flags[SQLCOM_SHOW_CLIENT_STATS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_USER_STATS]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_TABLE_STATS]= CF_STATUS_COMMAND;
@@ -386,6 +390,19 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_EXECUTE]= CF_CAN_GENERATE_ROW_EVENTS;
/*
+ We don't want to change to statement based replication for these commands
+ */
+ sql_command_flags[SQLCOM_ROLLBACK]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
+ /* We don't want to replicate ALTER TABLE for temp tables in row format */
+ sql_command_flags[SQLCOM_ALTER_TABLE]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
+ /* We don't want to replicate TRUNCATE for temp tables in row format */
+ sql_command_flags[SQLCOM_TRUNCATE]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
+ /* We don't want to replicate DROP for temp tables in row format */
+ sql_command_flags[SQLCOM_DROP_TABLE]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
+ /* One can change replication mode with SET */
+ sql_command_flags[SQLCOM_SET_OPTION]|= CF_FORCE_ORIGINAL_BINLOG_FORMAT;
+
+ /*
The following admin table operations are allowed
on log tables.
*/
@@ -625,8 +642,6 @@ void do_handle_bootstrap(THD *thd)
handle_bootstrap_impl(thd);
end:
- net_end(&thd->net);
- thd->cleanup();
delete thd;
#ifndef EMBEDDED_LIBRARY
@@ -1287,17 +1302,21 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
and flushes tables.
*/
bool res;
- my_pthread_setspecific_ptr(THR_THD, NULL);
+ set_current_thd(0);
res= reload_acl_and_cache(NULL, options | REFRESH_FAST,
NULL, &not_used);
- my_pthread_setspecific_ptr(THR_THD, thd);
+ set_current_thd(thd);
if (res)
break;
}
else
#endif
- if (reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, &not_used))
- break;
+ {
+ thd->lex->relay_log_connection_name.str= (char*) "";
+ thd->lex->relay_log_connection_name.length= 0;
+ if (reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, &not_used))
+ break;
+ }
if (trans_commit_implicit(thd))
break;
close_thread_tables(thd);
@@ -1457,6 +1476,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd_proc_info(thd, "cleaning up");
thd->reset_query();
+ thd->examined_row_count= 0; // For processlist
thd->command=COM_SLEEP;
dec_thread_running();
thd_proc_info(thd, 0);
@@ -2108,6 +2128,22 @@ mysql_execute_command(THD *thd)
DBUG_ASSERT(thd->transaction.stmt.modified_non_trans_table == FALSE);
+ /* store old value of binlog format */
+ enum_binlog_format orig_binlog_format,orig_current_stmt_binlog_format;
+
+ thd->get_binlog_format(&orig_binlog_format,
+ &orig_current_stmt_binlog_format);
+
+ /*
+ Force statement logging for DDL commands to allow us to update
+ privilege, system or statistic tables directly without the updates
+ getting logged.
+ */
+ if (!(sql_command_flags[lex->sql_command] &
+ (CF_CAN_GENERATE_ROW_EVENTS | CF_FORCE_ORIGINAL_BINLOG_FORMAT |
+ CF_STATUS_COMMAND)))
+ thd->set_binlog_format_stmt();
+
/*
End a active transaction so that this command will have it's
own transaction and will also sync the binary log. If a DDL is
@@ -2154,6 +2190,33 @@ mysql_execute_command(THD *thd)
execute_show_status(thd, all_tables);
break;
}
+ case SQLCOM_SHOW_EXPLAIN:
+ {
+ if (!thd->security_ctx->priv_user[0] &&
+ check_global_access(thd,PROCESS_ACL))
+ break;
+
+ /*
+ The select should use only one table, it's the SHOW EXPLAIN pseudo-table
+ */
+ if (lex->sroutines.records || lex->query_tables->next_global)
+ {
+ my_message(ER_SET_CONSTANTS_ONLY, ER(ER_SET_CONSTANTS_ONLY),
+ MYF(0));
+ goto error;
+ }
+
+ Item **it= lex->value_list.head_ref();
+ if (!(*it)->basic_const_item() ||
+ (!(*it)->fixed && (*it)->fix_fields(lex->thd, it)) ||
+ (*it)->check_cols(1))
+ {
+ my_message(ER_SET_CONSTANTS_ONLY, ER(ER_SET_CONSTANTS_ONLY),
+ MYF(0));
+ goto error;
+ }
+ /* no break; fall through */
+ }
case SQLCOM_SHOW_DATABASES:
case SQLCOM_SHOW_TABLES:
case SQLCOM_SHOW_TRIGGERS:
@@ -2327,10 +2390,42 @@ case SQLCOM_PREPARE:
#ifdef HAVE_REPLICATION
case SQLCOM_CHANGE_MASTER:
{
+ LEX_MASTER_INFO *lex_mi= &thd->lex->mi;
+ Master_info *mi;
+ bool new_master= 0;
+
if (check_global_access(thd, SUPER_ACL))
goto error;
mysql_mutex_lock(&LOCK_active_mi);
- res = change_master(thd,active_mi);
+
+ mi= master_info_index->get_master_info(&lex_mi->connection_name,
+ MYSQL_ERROR::WARN_LEVEL_NOTE);
+
+ if (mi == NULL)
+ {
+ /* New replication created */
+ mi= new Master_info(&lex_mi->connection_name, relay_log_recovery);
+ if (!mi || mi->error())
+ {
+ delete mi;
+ res= 1;
+ mysql_mutex_unlock(&LOCK_active_mi);
+ break;
+ }
+ new_master= 1;
+ }
+
+ res= change_master(thd, mi);
+ if (res && new_master)
+ {
+ /*
+ The new master was added by change_master(). Remove it as it didn't
+ work.
+ */
+ master_info_index->remove_master_info(&lex_mi->connection_name);
+ delete mi;
+ }
+
mysql_mutex_unlock(&LOCK_active_mi);
break;
}
@@ -2340,15 +2435,19 @@ case SQLCOM_PREPARE:
if (check_global_access(thd, SUPER_ACL | REPL_CLIENT_ACL))
goto error;
mysql_mutex_lock(&LOCK_active_mi);
- if (active_mi != NULL)
- {
- res = show_master_info(thd, active_mi);
- }
+
+ if (lex->verbose)
+ res= show_all_master_info(thd);
else
{
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- WARN_NO_MASTER_INFO, ER(WARN_NO_MASTER_INFO));
- my_ok(thd);
+ LEX_MASTER_INFO *lex_mi= &thd->lex->mi;
+ Master_info *mi;
+ mi= master_info_index->get_master_info(&lex_mi->connection_name,
+ MYSQL_ERROR::WARN_LEVEL_ERROR);
+ if (mi != NULL)
+ {
+ res= show_master_info(thd, mi, 0);
+ }
}
mysql_mutex_unlock(&LOCK_active_mi);
break;
@@ -2655,40 +2754,79 @@ end_with_restore_list:
#ifdef HAVE_REPLICATION
case SQLCOM_SLAVE_START:
{
+ LEX_MASTER_INFO* lex_mi= &thd->lex->mi;
+ Master_info *mi;
mysql_mutex_lock(&LOCK_active_mi);
- start_slave(thd,active_mi,1 /* net report*/);
+
+ if ((mi= (master_info_index->
+ get_master_info(&lex_mi->connection_name,
+ MYSQL_ERROR::WARN_LEVEL_ERROR))))
+ if (!start_slave(thd, mi, 1 /* net report*/))
+ my_ok(thd);
mysql_mutex_unlock(&LOCK_active_mi);
break;
}
case SQLCOM_SLAVE_STOP:
- /*
- If the client thread has locked tables, a deadlock is possible.
- Assume that
- - the client thread does LOCK TABLE t READ.
- - then the master updates t.
- - then the SQL slave thread wants to update t,
- so it waits for the client thread because t is locked by it.
+ {
+ LEX_MASTER_INFO *lex_mi;
+ Master_info *mi;
+ /*
+ If the client thread has locked tables, a deadlock is possible.
+ Assume that
+ - the client thread does LOCK TABLE t READ.
+ - then the master updates t.
+ - then the SQL slave thread wants to update t,
+ so it waits for the client thread because t is locked by it.
- then the client thread does SLAVE STOP.
SLAVE STOP waits for the SQL slave thread to terminate its
update t, which waits for the client thread because t is locked by it.
- To prevent that, refuse SLAVE STOP if the
- client thread has locked tables
- */
- if (thd->locked_tables_mode ||
- thd->in_active_multi_stmt_transaction() || thd->global_read_lock.is_acquired())
+ To prevent that, refuse SLAVE STOP if the
+ client thread has locked tables
+ */
+ if (thd->locked_tables_mode ||
+ thd->in_active_multi_stmt_transaction() ||
+ thd->global_read_lock.is_acquired())
+ {
+ my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
+ ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
+ goto error;
+ }
+
+ lex_mi= &thd->lex->mi;
+ mysql_mutex_lock(&LOCK_active_mi);
+ if ((mi= (master_info_index->
+ get_master_info(&lex_mi->connection_name,
+ MYSQL_ERROR::WARN_LEVEL_ERROR))))
+ if (!stop_slave(thd, mi, 1/* net report*/))
+ my_ok(thd);
+ mysql_mutex_unlock(&LOCK_active_mi);
+ break;
+ }
+ case SQLCOM_SLAVE_ALL_START:
{
- my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
- ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
- goto error;
+ mysql_mutex_lock(&LOCK_active_mi);
+ if (!master_info_index->start_all_slaves(thd))
+ my_ok(thd);
+ mysql_mutex_unlock(&LOCK_active_mi);
+ break;
}
+ case SQLCOM_SLAVE_ALL_STOP:
{
+ if (thd->locked_tables_mode ||
+ thd->in_active_multi_stmt_transaction() ||
+ thd->global_read_lock.is_acquired())
+ {
+ my_message(ER_LOCK_OR_ACTIVE_TRANSACTION,
+ ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0));
+ goto error;
+ }
mysql_mutex_lock(&LOCK_active_mi);
- stop_slave(thd,active_mi,1/* net report*/);
+ if (!master_info_index->stop_all_slaves(thd))
+ my_ok(thd);
mysql_mutex_unlock(&LOCK_active_mi);
break;
}
#endif /* HAVE_REPLICATION */
-
case SQLCOM_RENAME_TABLE:
{
if (execute_rename_table(thd, first_table, all_tables))
@@ -4547,6 +4685,11 @@ finish:
if (lex->sql_command != SQLCOM_SET_OPTION && ! thd->in_sub_stmt)
DEBUG_SYNC(thd, "execute_command_after_close_tables");
#endif
+ if (!(sql_command_flags[lex->sql_command] &
+ (CF_CAN_GENERATE_ROW_EVENTS | CF_FORCE_ORIGINAL_BINLOG_FORMAT |
+ CF_STATUS_COMMAND)))
+ thd->set_binlog_format(orig_binlog_format,
+ orig_current_stmt_binlog_format);
if (stmt_causes_implicit_commit(thd, CF_IMPLICIT_COMMIT_END))
{
@@ -4667,7 +4810,8 @@ static bool execute_show_status(THD *thd, TABLE_LIST *all_tables)
mysql_mutex_lock(&LOCK_status);
add_diff_to_status(&global_status_var, &thd->status_var,
&old_status_var);
- thd->status_var= old_status_var;
+ memcpy(&thd->status_var, &old_status_var,
+ offsetof(STATUS_VAR, last_cleared_system_status_var));
mysql_mutex_unlock(&LOCK_status);
return res;
}
@@ -5835,6 +5979,7 @@ bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type,
{
register Create_field *new_field;
LEX *lex= thd->lex;
+ uint8 datetime_precision= length ? atoi(length) : 0;
DBUG_ENTER("add_field_to_list");
if (check_string_char_length(field_name, "", NAME_CHAR_LEN,
@@ -5871,11 +6016,13 @@ bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type,
no need fix_fields()
We allow only one function as part of default value -
- NOW() as default for TIMESTAMP type.
+ NOW() as default for TIMESTAMP and DATETIME type.
*/
if (default_value->type() == Item::FUNC_ITEM &&
- !(((Item_func*)default_value)->functype() == Item_func::NOW_FUNC &&
- type == MYSQL_TYPE_TIMESTAMP))
+ (static_cast<Item_func*>(default_value)->functype() !=
+ Item_func::NOW_FUNC ||
+ (mysql_type_to_time_type(type) != MYSQL_TIMESTAMP_DATETIME) ||
+ default_value->decimals < datetime_precision))
{
my_error(ER_INVALID_DEFAULT, MYF(0), field_name->str);
DBUG_RETURN(1);
@@ -5897,7 +6044,9 @@ bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type,
}
}
- if (on_update_value && type != MYSQL_TYPE_TIMESTAMP)
+ if (on_update_value &&
+ (mysql_type_to_time_type(type) != MYSQL_TIMESTAMP_DATETIME ||
+ on_update_value->decimals < datetime_precision))
{
my_error(ER_INVALID_ON_UPDATE, MYF(0), field_name->str);
DBUG_RETURN(1);
@@ -6582,23 +6731,17 @@ void add_join_natural(TABLE_LIST *a, TABLE_LIST *b, List<String> *using_fields,
/**
- kill on thread.
+ Find a thread by id and return it, locking it LOCK_thd_data
- @param thd Thread class
- @param id Thread id
- @param only_kill_query Should it kill the query or the connection
+ @param id Identifier of the thread we're looking for
- @note
- This is written such that we have a short lock on LOCK_thread_count
+ @return NULL - not found
+ pointer - thread found, and its LOCK_thd_data is locked.
*/
-uint kill_one_thread(THD *thd, ulong id, killed_state kill_signal)
+THD *find_thread_by_id(ulong id)
{
THD *tmp;
- uint error=ER_NO_SUCH_THREAD;
- DBUG_ENTER("kill_one_thread");
- DBUG_PRINT("enter", ("id: %lu signal: %u", id, (uint) kill_signal));
-
mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
I_List_iterator<THD> it(threads);
while ((tmp=it++))
@@ -6612,7 +6755,29 @@ uint kill_one_thread(THD *thd, ulong id, killed_state kill_signal)
}
}
mysql_mutex_unlock(&LOCK_thread_count);
- if (tmp)
+ return tmp;
+}
+
+
+/**
+ kill on thread.
+
+ @param thd Thread class
+ @param id Thread id
+ @param only_kill_query Should it kill the query or the connection
+
+ @note
+ This is written such that we have a short lock on LOCK_thread_count
+*/
+
+uint kill_one_thread(THD *thd, ulong id, killed_state kill_signal)
+{
+ THD *tmp;
+ uint error=ER_NO_SUCH_THREAD;
+ DBUG_ENTER("kill_one_thread");
+ DBUG_PRINT("enter", ("id: %lu signal: %u", id, (uint) kill_signal));
+
+ if ((tmp= find_thread_by_id(id)))
{
/*
If we're SUPER, we can KILL anything, including system-threads.
@@ -6700,7 +6865,7 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user,
mysql_mutex_unlock(&LOCK_thread_count);
DBUG_RETURN(ER_KILL_DENIED_ERROR);
}
- if (!threads_to_kill.push_back(tmp, tmp->mem_root))
+ if (!threads_to_kill.push_back(tmp, thd->mem_root))
mysql_mutex_lock(&tmp->LOCK_thd_data); // Lock from delete
}
}
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 2bec12e4f66..f042f028450 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -6526,9 +6526,6 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
lpt->pack_frm_data= NULL;
lpt->pack_frm_len= 0;
- /* Never update timestamp columns when alter */
- lpt->table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
-
if (table->file->alter_table_flags(alter_info->flags) &
HA_PARTITION_ONE_PHASE)
{
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index bf3537c0ed9..38d8dcf7994 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -175,7 +175,8 @@ static struct
/* we disable few other plugins by default */
{ "ndbcluster", PLUGIN_OFF },
{ "feedback", PLUGIN_OFF },
- { "pbxt", PLUGIN_OFF }
+ { "pbxt", PLUGIN_OFF },
+ { "cassandra", PLUGIN_OFF }
};
/* support for Services */
@@ -1042,6 +1043,8 @@ static bool plugin_add(MEM_ROOT *tmp_root,
struct st_maria_plugin *plugin;
uint oks= 0, errs= 0;
DBUG_ENTER("plugin_add");
+ DBUG_PRINT("enter", ("name: %s dl: %s", name->str, dl->str));
+
if (name->str && plugin_find_internal(name, MYSQL_ANY_PLUGIN))
{
report_error(report, ER_UDF_EXISTS, name->str);
@@ -1111,7 +1114,7 @@ static bool plugin_add(MEM_ROOT *tmp_root,
plugin_array_version++;
if (my_hash_insert(&plugin_hash[plugin->type], (uchar*)tmp_plugin_ptr))
tmp_plugin_ptr->state= PLUGIN_IS_FREED;
- init_alloc_root(&tmp_plugin_ptr->mem_root, 4096, 4096);
+ init_alloc_root(&tmp_plugin_ptr->mem_root, 4096, 4096, MYF(0));
if (name->str)
DBUG_RETURN(FALSE); // all done
@@ -1504,8 +1507,8 @@ int plugin_init(int *argc, char **argv, int flags)
init_plugin_psi_keys();
#endif
- init_alloc_root(&plugin_mem_root, 4096, 4096);
- init_alloc_root(&tmp_root, 4096, 4096);
+ init_alloc_root(&plugin_mem_root, 4096, 4096, MYF(0));
+ init_alloc_root(&tmp_root, 4096, 4096, MYF(0));
if (my_hash_init(&bookmark_hash, &my_charset_bin, 16, 0, 0,
get_bookmark_hash_key, NULL, HASH_UNIQUE))
@@ -1515,9 +1518,9 @@ int plugin_init(int *argc, char **argv, int flags)
mysql_mutex_init(key_LOCK_plugin, &LOCK_plugin, MY_MUTEX_INIT_FAST);
if (my_init_dynamic_array(&plugin_dl_array,
- sizeof(struct st_plugin_dl *),16,16) ||
+ sizeof(struct st_plugin_dl *), 16, 16, MYF(0)) ||
my_init_dynamic_array(&plugin_array,
- sizeof(struct st_plugin_int *),16,16))
+ sizeof(struct st_plugin_int *), 16, 16, MYF(0)))
goto err;
for (i= 0; i < MYSQL_MAX_PLUGIN_TYPE_NUM; i++)
@@ -1713,12 +1716,11 @@ static bool register_builtin(struct st_maria_plugin *plugin,
*/
static void plugin_load(MEM_ROOT *tmp_root, int *argc, char **argv)
{
- THD thd;
TABLE_LIST tables;
TABLE *table;
READ_RECORD read_record_info;
int error;
- THD *new_thd= &thd;
+ THD *new_thd= new THD;
bool result;
#ifdef EMBEDDED_LIBRARY
No_such_table_error_handler error_handler;
@@ -1729,7 +1731,7 @@ static void plugin_load(MEM_ROOT *tmp_root, int *argc, char **argv)
new_thd->store_globals();
new_thd->db= my_strdup("mysql", MYF(0));
new_thd->db_length= 5;
- bzero((char*) &thd.net, sizeof(thd.net));
+ bzero((char*) &new_thd->net, sizeof(new_thd->net));
tables.init_one_table("mysql", 5, "plugin", 6, "plugin", TL_READ);
#ifdef EMBEDDED_LIBRARY
@@ -1796,7 +1798,8 @@ static void plugin_load(MEM_ROOT *tmp_root, int *argc, char **argv)
close_mysql_tables(new_thd);
end:
/* Remember that we don't have a THD */
- my_pthread_setspecific_ptr(THR_THD, 0);
+ delete new_thd;
+ set_current_thd(0);
DBUG_VOID_RETURN;
}
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index c1f07581106..71b4a0cf817 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -2142,6 +2142,7 @@ static bool check_prepared_statement(Prepared_statement *stmt)
Note that we don't need to have cases in this list if they are
marked with CF_STATUS_COMMAND in sql_command_flags
*/
+ case SQLCOM_SHOW_EXPLAIN:
case SQLCOM_DROP_TABLE:
case SQLCOM_RENAME_TABLE:
case SQLCOM_ALTER_TABLE:
@@ -2159,6 +2160,8 @@ static bool check_prepared_statement(Prepared_statement *stmt)
case SQLCOM_FLUSH:
case SQLCOM_SLAVE_START:
case SQLCOM_SLAVE_STOP:
+ case SQLCOM_SLAVE_ALL_START:
+ case SQLCOM_SLAVE_ALL_STOP:
case SQLCOM_INSTALL_PLUGIN:
case SQLCOM_UNINSTALL_PLUGIN:
case SQLCOM_CREATE_DB:
@@ -3138,7 +3141,7 @@ Prepared_statement::Prepared_statement(THD *thd_arg)
flags((uint) IS_IN_USE)
{
init_sql_alloc(&main_mem_root, thd_arg->variables.query_alloc_block_size,
- thd_arg->variables.query_prealloc_size);
+ thd_arg->variables.query_prealloc_size, MYF(MY_THREAD_SPECIFIC));
*last_error= '\0';
}
@@ -4028,7 +4031,7 @@ Ed_result_set::Ed_result_set(List<Ed_row> *rows_arg,
*/
Ed_connection::Ed_connection(THD *thd)
- :m_warning_info(thd->query_id, false),
+ :m_warning_info(thd->query_id, false, true),
m_thd(thd),
m_rsets(0),
m_current_rset(0)
@@ -4452,7 +4455,7 @@ bool Protocol_local::send_result_set_metadata(List<Item> *columns, uint)
{
DBUG_ASSERT(m_rset == 0 && !alloc_root_inited(&m_rset_root));
- init_sql_alloc(&m_rset_root, MEM_ROOT_BLOCK_SIZE, 0);
+ init_sql_alloc(&m_rset_root, MEM_ROOT_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
if (! (m_rset= new (&m_rset_root) List<Ed_row>))
return TRUE;
diff --git a/sql/sql_priv.h b/sql/sql_priv.h
index 6e778c09bd8..345556a0b7f 100644
--- a/sql/sql_priv.h
+++ b/sql/sql_priv.h
@@ -362,10 +362,12 @@ enum enum_yes_no_unknown
};
#ifdef MYSQL_SERVER
+
/*
External variables
*/
+
/* sql_yacc.cc */
#ifndef DBUG_OFF
extern void turn_parser_debug_on();
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index 914b9026014..2720dc7cd74 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -26,6 +26,7 @@
#include "sql_repl.h" // reset_master, reset_slave
#include "rpl_mi.h" // Master_info::data_lock
#include "debug_sync.h"
+#include "rpl_mi.h"
static void disable_checkpoints(THD *thd);
@@ -96,7 +97,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long options,
{
delete tmp_thd;
/* Remember that we don't have a THD */
- my_pthread_setspecific_ptr(THR_THD, 0);
+ set_current_thd(0);
thd= 0;
}
reset_mqh((LEX_USER *)NULL, TRUE);
@@ -157,10 +158,36 @@ bool reload_acl_and_cache(THD *thd, unsigned long options,
if (options & REFRESH_RELAY_LOG)
{
#ifdef HAVE_REPLICATION
- mysql_mutex_lock(&active_mi->data_lock);
- if (rotate_relay_log(active_mi))
- *write_to_binlog= -1;
- mysql_mutex_unlock(&active_mi->data_lock);
+ LEX_STRING connection_name;
+ Master_info *mi;
+ if (thd)
+ connection_name= thd->lex->relay_log_connection_name;
+ else
+ {
+ connection_name.str= (char*) "";
+ connection_name.length= 0;
+ }
+
+ /*
+ Writing this command to the binlog may cause problems as the
+ slave is not likely to have the same connection names.
+ */
+ tmp_write_to_binlog= 0;
+ mysql_mutex_lock(&LOCK_active_mi);
+ if (!(mi= (master_info_index->
+ get_master_info(&connection_name,
+ MYSQL_ERROR::WARN_LEVEL_ERROR))))
+ {
+ result= 1;
+ }
+ else
+ {
+ mysql_mutex_lock(&mi->data_lock);
+ if (rotate_relay_log(mi))
+ *write_to_binlog= -1;
+ mysql_mutex_unlock(&mi->data_lock);
+ }
+ mysql_mutex_unlock(&LOCK_active_mi);
#endif
}
#ifdef HAVE_QUERY_CACHE
@@ -314,13 +341,27 @@ bool reload_acl_and_cache(THD *thd, unsigned long options,
#ifdef HAVE_REPLICATION
if (options & REFRESH_SLAVE)
{
+ LEX_MASTER_INFO* lex_mi= &thd->lex->mi;
+ Master_info *mi;
tmp_write_to_binlog= 0;
mysql_mutex_lock(&LOCK_active_mi);
- if (reset_slave(thd, active_mi))
+
+ if (!(mi= (master_info_index->
+ get_master_info(&lex_mi->connection_name,
+ MYSQL_ERROR::WARN_LEVEL_ERROR))))
+ {
+ result= 1;
+ }
+ else if (reset_slave(thd, mi))
{
/* NOTE: my_error() has been already called by reset_slave(). */
result= 1;
}
+ else if (mi->connection_name.length && thd->lex->reset_slave_info.all)
+ {
+ /* If not default connection and 'all' is used */
+ master_info_index->remove_master_info(&mi->connection_name);
+ }
mysql_mutex_unlock(&LOCK_active_mi);
}
#endif
diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc
index 6b0d1e980f9..c91623cee6e 100644
--- a/sql/sql_rename.cc
+++ b/sql/sql_rename.cc
@@ -28,6 +28,7 @@
#include "lock.h" // MYSQL_OPEN_SKIP_TEMPORARY
#include "sql_base.h" // tdc_remove_table, lock_table_names,
#include "sql_handler.h" // mysql_ha_rm_tables
+#include "sql_statistics.h"
#include "datadict.h"
static TABLE_LIST *rename_tables(THD *thd, TABLE_LIST *table_list,
@@ -279,6 +280,12 @@ do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name,
ren_table->db, old_alias,
new_db, new_alias, 0)))
{
+ LEX_STRING db_name= { ren_table->db, ren_table->db_length };
+ LEX_STRING table_name= { ren_table->table_name,
+ ren_table->table_name_length };
+ LEX_STRING new_table= { (char *) new_alias, strlen(new_alias) };
+ (void) rename_table_in_stat_tables(thd, &db_name, &table_name,
+ &db_name, &new_table);
if ((rc= Table_triggers_list::change_table_name(thd, ren_table->db,
old_alias,
ren_table->table_name,
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 631825f0527..5b2e498cb00 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -34,14 +34,6 @@ my_bool opt_sporadic_binlog_dump_fail = 0;
static int binlog_dump_count = 0;
#endif
-/**
- a copy of active_mi->rli->slave_skip_counter, for showing in SHOW VARIABLES,
- INFORMATION_SCHEMA.GLOBAL_VARIABLES and @@sql_slave_skip_counter without
- taking all the mutexes needed to access active_mi->rli->slave_skip_counter
- properly.
-*/
-uint sql_slave_skip_counter;
-
extern TYPELIB binlog_checksum_typelib;
/*
@@ -492,6 +484,27 @@ static ulonglong get_heartbeat_period(THD * thd)
}
/*
+ Lookup the capabilities of the slave, which it announces by setting a value
+ MARIA_SLAVE_CAPABILITY_XXX in @mariadb_slave_capability.
+
+ Older MariaDB slaves, and other MySQL slaves, do not set
+ @mariadb_slave_capability, corresponding to a capability of
+ MARIA_SLAVE_CAPABILITY_UNKNOWN (0).
+*/
+static int
+get_mariadb_slave_capability(THD *thd)
+{
+ bool null_value;
+ const LEX_STRING name= { C_STRING_WITH_LEN("mariadb_slave_capability") };
+ const user_var_entry *entry=
+ (user_var_entry*) my_hash_search(&thd->user_vars, (uchar*) name.str,
+ name.length);
+ return entry ?
+ (int)(entry->val_int(&null_value)) : MARIA_SLAVE_CAPABILITY_UNKNOWN;
+}
+
+
+/*
Function prepares and sends repliation heartbeat event.
@param net net object of THD
@@ -563,14 +576,68 @@ static int send_heartbeat_event(NET* net, String* packet,
static const char *
send_event_to_slave(THD *thd, NET *net, String* const packet, ushort flags,
Log_event_type event_type, char *log_file_name,
- IO_CACHE *log)
+ IO_CACHE *log, int mariadb_slave_capability,
+ ulong ev_offset, uint8 current_checksum_alg)
{
my_off_t pos;
/* Do not send annotate_rows events unless slave requested it. */
- if (event_type == ANNOTATE_ROWS_EVENT &&
- !(flags & BINLOG_SEND_ANNOTATE_ROWS_EVENT))
- return NULL;
+ if (event_type == ANNOTATE_ROWS_EVENT && !(flags & BINLOG_SEND_ANNOTATE_ROWS_EVENT))
+ {
+ if (mariadb_slave_capability >= MARIA_SLAVE_CAPABILITY_TOLERATE_HOLES)
+ {
+ /* This slave can tolerate events omitted from the binlog stream. */
+ return NULL;
+ }
+ else if (mariadb_slave_capability >= MARIA_SLAVE_CAPABILITY_ANNOTATE)
+ {
+ /*
+ The slave did not request ANNOTATE_ROWS_EVENT (it does not need them as
+ it will not log them in its own binary log). However, it understands the
+ event and will just ignore it, and it would break if we omitted it,
+ leaving a hole in the binlog stream. So just send the event as-is.
+ */
+ }
+ else
+ {
+ /*
+ The slave does not understand ANNOTATE_ROWS_EVENT.
+
+ Older MariaDB slaves (and MySQL slaves) will break replication if there
+ are holes in the binlog stream (they will miscompute the binlog offset
+ and request the wrong position when reconnecting).
+
+ So replace the event with a dummy event of the same size that will be
+ a no-operation on the slave.
+ */
+ if (Query_log_event::dummy_event(packet, ev_offset, current_checksum_alg))
+ return "Failed to replace row annotate event with dummy: too small event.";
+ }
+ }
+
+ /*
+ Do not send binlog checkpoint events to a slave that does not understand it.
+ */
+ if (unlikely(event_type == BINLOG_CHECKPOINT_EVENT) &&
+ mariadb_slave_capability < MARIA_SLAVE_CAPABILITY_BINLOG_CHECKPOINT)
+ {
+ if (mariadb_slave_capability >= MARIA_SLAVE_CAPABILITY_TOLERATE_HOLES)
+ {
+ /* This slave can tolerate events omitted from the binlog stream. */
+ return NULL;
+ }
+ else
+ {
+ /*
+ The slave does not understand BINLOG_CHECKPOINT_EVENT. Send a dummy
+ event instead, with same length so slave does not get confused about
+ binlog positions.
+ */
+ if (Query_log_event::dummy_event(packet, ev_offset, current_checksum_alg))
+ return "Failed to replace binlog checkpoint event with dummy: "
+ "too small event.";
+ }
+ }
/*
Skip events with the @@skip_replication flag set, if slave requested
@@ -628,6 +695,7 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
NET* net = &thd->net;
mysql_mutex_t *log_lock;
mysql_cond_t *log_cond;
+ int mariadb_slave_capability;
uint8 current_checksum_alg= BINLOG_CHECKSUM_ALG_UNDEF;
int old_max_allowed_packet= thd->variables.max_allowed_packet;
@@ -653,6 +721,7 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
heartbeat_ts= &heartbeat_buf;
set_timespec_nsec(*heartbeat_ts, 0);
}
+ mariadb_slave_capability= get_mariadb_slave_capability(thd);
if (global_system_variables.log_warnings > 1)
sql_print_information("Start binlog_dump to slave_server(%d), pos(%s, %lu)",
thd->server_id, log_ident, (ulong)pos);
@@ -939,7 +1008,9 @@ impossible position";
}
if ((tmp_msg= send_event_to_slave(thd, net, packet, flags, event_type,
- log_file_name, &log)))
+ log_file_name, &log,
+ mariadb_slave_capability, ev_offset,
+ current_checksum_alg)))
{
errmsg= tmp_msg;
my_errno= ER_UNKNOWN_ERROR;
@@ -1097,7 +1168,9 @@ impossible position";
if (read_packet &&
(tmp_msg= send_event_to_slave(thd, net, packet, flags, event_type,
- log_file_name, &log)))
+ log_file_name, &log,
+ mariadb_slave_capability, ev_offset,
+ current_checksum_alg)))
{
errmsg= tmp_msg;
my_errno= ER_UNKNOWN_ERROR;
@@ -1224,15 +1297,28 @@ err:
@retval 0 success
@retval 1 error
+ @retval -1 fatal error
*/
+
int start_slave(THD* thd , Master_info* mi, bool net_report)
{
int slave_errno= 0;
int thread_mask;
+ char master_info_file_tmp[FN_REFLEN];
+ char relay_log_info_file_tmp[FN_REFLEN];
DBUG_ENTER("start_slave");
if (check_access(thd, SUPER_ACL, any_db, NULL, NULL, 0, 0))
- DBUG_RETURN(1);
+ DBUG_RETURN(-1);
+
+ create_logfile_name_with_suffix(master_info_file_tmp,
+ sizeof(master_info_file_tmp),
+ master_info_file, 0, &mi->connection_name);
+ create_logfile_name_with_suffix(relay_log_info_file_tmp,
+ sizeof(relay_log_info_file_tmp),
+ relay_log_info_file, 0,
+ &mi->connection_name);
+
lock_slave_threads(mi); // this allows us to cleanly read slave_running
// Get a mask of _stopped_ threads
init_thread_mask(&thread_mask,mi,1 /* inverse */);
@@ -1246,7 +1332,7 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
thread_mask&= thd->lex->slave_thd_opt;
if (thread_mask) //some threads are stopped, start them
{
- if (init_master_info(mi,master_info_file,relay_log_info_file, 0,
+ if (init_master_info(mi,master_info_file_tmp,relay_log_info_file_tmp, 0,
thread_mask))
slave_errno=ER_MASTER_INFO;
else if (server_id_supplied && *mi->host)
@@ -1320,10 +1406,11 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
if (!slave_errno)
slave_errno = start_slave_threads(0 /*no mutex */,
- 1 /* wait for start */,
- mi,
- master_info_file,relay_log_info_file,
- thread_mask);
+ 1 /* wait for start */,
+ mi,
+ master_info_file_tmp,
+ relay_log_info_file_tmp,
+ thread_mask);
}
else
slave_errno = ER_BAD_SLAVE;
@@ -1340,11 +1427,11 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
if (slave_errno)
{
if (net_report)
- my_message(slave_errno, ER(slave_errno), MYF(0));
- DBUG_RETURN(1);
+ my_error(slave_errno, MYF(0),
+ (int) mi->connection_name.length,
+ mi->connection_name.str);
+ DBUG_RETURN(slave_errno == ER_BAD_SLAVE ? -1 : 1);
}
- else if (net_report)
- my_ok(thd);
DBUG_RETURN(0);
}
@@ -1362,17 +1449,17 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
@retval 0 success
@retval 1 error
+ @retval -1 error
*/
+
int stop_slave(THD* thd, Master_info* mi, bool net_report )
{
- DBUG_ENTER("stop_slave");
-
int slave_errno;
- if (!thd)
- thd = current_thd;
+ DBUG_ENTER("stop_slave");
+ DBUG_PRINT("enter",("Connection: %s", mi->connection_name.str));
if (check_access(thd, SUPER_ACL, any_db, NULL, NULL, 0, 0))
- DBUG_RETURN(1);
+ DBUG_RETURN(-1);
thd_proc_info(thd, "Killing slave");
int thread_mask;
lock_slave_threads(mi);
@@ -1408,8 +1495,6 @@ int stop_slave(THD* thd, Master_info* mi, bool net_report )
my_message(slave_errno, ER(slave_errno), MYF(0));
DBUG_RETURN(1);
}
- else if (net_report)
- my_ok(thd);
DBUG_RETURN(0);
}
@@ -1433,15 +1518,18 @@ int reset_slave(THD *thd, Master_info* mi)
int thread_mask= 0, error= 0;
uint sql_errno=ER_UNKNOWN_ERROR;
const char* errmsg= "Unknown error occured while reseting slave";
+ char master_info_file_tmp[FN_REFLEN];
+ char relay_log_info_file_tmp[FN_REFLEN];
DBUG_ENTER("reset_slave");
lock_slave_threads(mi);
init_thread_mask(&thread_mask,mi,0 /* not inverse */);
if (thread_mask) // We refuse if any slave thread is running
{
- sql_errno= ER_SLAVE_MUST_STOP;
- error=1;
- goto err;
+ unlock_slave_threads(mi);
+ my_error(ER_SLAVE_MUST_STOP, MYF(0), (int) mi->connection_name.length,
+ mi->connection_name.str);
+ DBUG_RETURN(ER_SLAVE_MUST_STOP);
}
ha_reset_slave(thd);
@@ -1468,22 +1556,35 @@ int reset_slave(THD *thd, Master_info* mi)
// close master_info_file, relay_log_info_file, set mi->inited=rli->inited=0
end_master_info(mi);
+
// and delete these two files
- fn_format(fname, master_info_file, mysql_data_home, "", 4+32);
+ create_logfile_name_with_suffix(master_info_file_tmp,
+ sizeof(master_info_file_tmp),
+ master_info_file, 0, &mi->connection_name);
+ create_logfile_name_with_suffix(relay_log_info_file_tmp,
+ sizeof(relay_log_info_file_tmp),
+ relay_log_info_file, 0, &mi->connection_name);
+
+ fn_format(fname, master_info_file_tmp, mysql_data_home, "", 4+32);
if (mysql_file_stat(key_file_master_info, fname, &stat_area, MYF(0)) &&
mysql_file_delete(key_file_master_info, fname, MYF(MY_WME)))
{
error=1;
goto err;
}
+ else if (global_system_variables.log_warnings > 1)
+ sql_print_information("Deleted Master_info file '%s'.", fname);
+
// delete relay_log_info_file
- fn_format(fname, relay_log_info_file, mysql_data_home, "", 4+32);
+ fn_format(fname, relay_log_info_file_tmp, mysql_data_home, "", 4+32);
if (mysql_file_stat(key_file_relay_log_info, fname, &stat_area, MYF(0)) &&
mysql_file_delete(key_file_relay_log_info, fname, MYF(MY_WME)))
{
error=1;
goto err;
}
+ else if (global_system_variables.log_warnings > 1)
+ sql_print_information("Deleted Master_info file '%s'.", fname);
RUN_HOOK(binlog_relay_io, after_reset_slave, (thd, mi));
err:
@@ -1588,20 +1689,12 @@ bool change_master(THD* thd, Master_info* mi)
char saved_host[HOSTNAME_LENGTH + 1];
uint saved_port;
char saved_log_name[FN_REFLEN];
+ char master_info_file_tmp[FN_REFLEN];
+ char relay_log_info_file_tmp[FN_REFLEN];
my_off_t saved_log_pos;
- DBUG_ENTER("change_master");
-
- lock_slave_threads(mi);
- init_thread_mask(&thread_mask,mi,0 /*not inverse*/);
LEX_MASTER_INFO* lex_mi= &thd->lex->mi;
- if (thread_mask) // We refuse if any slave thread is running
- {
- my_message(ER_SLAVE_MUST_STOP, ER(ER_SLAVE_MUST_STOP), MYF(0));
- ret= TRUE;
- goto err;
- }
+ DBUG_ENTER("change_master");
- thd_proc_info(thd, "Changing master");
/*
We need to check if there is an empty master_host. Otherwise
change master succeeds, a master.info file is created containing
@@ -1609,17 +1702,61 @@ bool change_master(THD* thd, Master_info* mi)
is thrown stating that the server is not configured as slave.
(See BUG#28796).
*/
- if(lex_mi->host && !*lex_mi->host)
+ if (lex_mi->host && !*lex_mi->host)
{
my_error(ER_WRONG_ARGUMENTS, MYF(0), "MASTER_HOST");
- unlock_slave_threads(mi);
DBUG_RETURN(TRUE);
}
- // TODO: see if needs re-write
- if (init_master_info(mi, master_info_file, relay_log_info_file, 0,
+ if (master_info_index->check_duplicate_master_info(&lex_mi->connection_name,
+ lex_mi->host,
+ lex_mi->port))
+ DBUG_RETURN(TRUE);
+
+ lock_slave_threads(mi);
+ init_thread_mask(&thread_mask,mi,0 /*not inverse*/);
+ if (thread_mask) // We refuse if any slave thread is running
+ {
+ my_error(ER_SLAVE_MUST_STOP, MYF(0), (int) mi->connection_name.length,
+ mi->connection_name.str);
+ ret= TRUE;
+ goto err;
+ }
+
+ thd_proc_info(thd, "Changing master");
+
+ create_logfile_name_with_suffix(master_info_file_tmp,
+ sizeof(master_info_file_tmp),
+ master_info_file, 0, &mi->connection_name);
+ create_logfile_name_with_suffix(relay_log_info_file_tmp,
+ sizeof(relay_log_info_file_tmp),
+ relay_log_info_file, 0, &mi->connection_name);
+
+ /* if new Master_info doesn't exists, add it */
+ if (!master_info_index->get_master_info(&mi->connection_name,
+ MYSQL_ERROR::WARN_LEVEL_NOTE))
+ {
+ if (master_info_index->add_master_info(mi, TRUE))
+ {
+ my_error(ER_MASTER_INFO, MYF(0),
+ (int) lex_mi->connection_name.length,
+ lex_mi->connection_name.str);
+ ret= TRUE;
+ goto err;
+ }
+ }
+ if (global_system_variables.log_warnings > 1)
+ sql_print_information("Master: '%.*s' Master_info_file: '%s' "
+ "Relay_info_file: '%s'",
+ (int) mi->connection_name.length,
+ mi->connection_name.str,
+ master_info_file_tmp, relay_log_info_file_tmp);
+
+ if (init_master_info(mi, master_info_file_tmp, relay_log_info_file_tmp, 0,
thread_mask))
{
- my_message(ER_MASTER_INFO, ER(ER_MASTER_INFO), MYF(0));
+ my_error(ER_MASTER_INFO, MYF(0),
+ (int) lex_mi->connection_name.length,
+ lex_mi->connection_name.str);
ret= TRUE;
goto err;
}
@@ -1888,7 +2025,7 @@ int reset_master(THD* thd)
return 1;
}
- if (mysql_bin_log.reset_logs(thd))
+ if (mysql_bin_log.reset_logs(thd, 1))
return 1;
RUN_HOOK(binlog_transmit, after_reset_master, (thd, 0 /* flags */));
return 0;
@@ -1914,6 +2051,7 @@ bool mysql_show_binlog_events(THD* thd)
File file = -1;
MYSQL_BIN_LOG *binary_log= NULL;
int old_max_allowed_packet= thd->variables.max_allowed_packet;
+ Master_info *mi= 0;
LOG_INFO linfo;
DBUG_ENTER("mysql_show_binlog_events");
@@ -1943,10 +2081,15 @@ bool mysql_show_binlog_events(THD* thd)
}
else /* showing relay log contents */
{
- if (!active_mi)
+ mysql_mutex_lock(&LOCK_active_mi);
+ if (!(mi= master_info_index->
+ get_master_info(&thd->variables.default_master_connection,
+ MYSQL_ERROR::WARN_LEVEL_ERROR)))
+ {
+ mysql_mutex_unlock(&LOCK_active_mi);
DBUG_RETURN(TRUE);
-
- binary_log= &(active_mi->rli.relay_log);
+ }
+ binary_log= &(mi->rli.relay_log);
}
if (binary_log->is_open())
@@ -1960,6 +2103,13 @@ bool mysql_show_binlog_events(THD* thd)
mysql_mutex_t *log_lock = binary_log->get_log_lock();
Log_event* ev;
+ if (mi)
+ {
+ /* We can unlock the mutex as we have a lock on the file */
+ mysql_mutex_unlock(&LOCK_active_mi);
+ mi= 0;
+ }
+
unit->set_limit(thd->lex->current_select);
limit_start= unit->offset_limit_cnt;
limit_end= unit->select_limit_cnt;
@@ -2054,6 +2204,9 @@ bool mysql_show_binlog_events(THD* thd)
mysql_mutex_unlock(log_lock);
}
+ else if (mi)
+ mysql_mutex_unlock(&LOCK_active_mi);
+
// Check that linfo is still on the function scope.
DEBUG_SYNC(thd, "after_show_binlog_events");
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 4fb05fe71c8..41c0b07bc49 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -51,6 +51,7 @@
#include "opt_subselect.h"
#include "log_slow.h"
#include "sql_derived.h"
+#include "sql_statistics.h"
#include "debug_sync.h" // DEBUG_SYNC
#include <m_ctype.h>
@@ -277,6 +278,57 @@ JOIN_TAB *first_breadth_first_tab(JOIN *join, enum enum_exec_or_opt tabs_kind);
JOIN_TAB *next_breadth_first_tab(JOIN *join, enum enum_exec_or_opt tabs_kind,
JOIN_TAB *tab);
+#ifndef DBUG_OFF
+
+/*
+ SHOW EXPLAIN testing: wait for, and serve n_calls APC requests.
+*/
+void dbug_serve_apcs(THD *thd, int n_calls)
+{
+ const char *save_proc_info= thd->proc_info;
+ /* This is so that mysqltest knows we're ready to serve requests: */
+ thd_proc_info(thd, "show_explain_trap");
+
+ /* Busy-wait for n_calls APC requests to arrive and be processed */
+ int n_apcs= thd->apc_target.n_calls_processed + n_calls;
+ while (thd->apc_target.n_calls_processed < n_apcs)
+ {
+ my_sleep(300);
+ if (thd->check_killed())
+ break;
+ }
+ thd_proc_info(thd, save_proc_info);
+}
+
+
+/*
+ Debugging: check if @name=value, comparing as integer
+
+ Intended usage:
+
+ DBUG_EXECUTE_IF("show_explain_probe_2",
+ if (dbug_user_var_equals_int(thd, "select_id", select_id))
+ dbug_serve_apcs(thd, 1);
+ );
+
+*/
+
+bool dbug_user_var_equals_int(THD *thd, const char *name, int value)
+{
+ user_var_entry *var;
+ LEX_STRING varname= {(char*)name, strlen(name)};
+ if ((var= get_variable(&thd->user_vars, varname, FALSE)))
+ {
+ bool null_value;
+ longlong var_value= var->val_int(&null_value);
+ if (!null_value && var_value == value)
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif
+
+
/**
This handles SELECT with and without UNION.
*/
@@ -934,6 +986,19 @@ err:
}
+int JOIN::optimize()
+{
+ int res= optimize_inner();
+ /*
+ If we're inside a non-correlated subquery, this function may be
+ called for the second time after the subquery has been executed
+ and deleted. The second call will not produce a valid query plan, it will
+ short-circuit because optimized==TRUE.
+ */
+ if (!res && have_query_plan != QEP_DELETED)
+ have_query_plan= QEP_AVAILABLE;
+ return res;
+}
/**
global select optimisation.
@@ -947,7 +1012,7 @@ err:
*/
int
-JOIN::optimize()
+JOIN::optimize_inner()
{
ulonglong select_opts_for_readinfo;
uint no_jbuf_after;
@@ -1437,9 +1502,10 @@ JOIN::optimize()
We have found that grouping can be removed since groups correspond to
only one row anyway, but we still have to guarantee correct result
order. The line below effectively rewrites the query from GROUP BY
- <fields> to ORDER BY <fields>. There are two exceptions:
+ <fields> to ORDER BY <fields>. There are three exceptions:
- if skip_sort_order is set (see above), then we can simply skip
GROUP BY;
+ - if we are in a subquery, we don't have to maintain order
- we can only rewrite ORDER BY if the ORDER BY fields are 'compatible'
with the GROUP BY ones, i.e. either one is a prefix of another.
We only check if the ORDER BY is a prefix of GROUP BY. In this case
@@ -1449,7 +1515,13 @@ JOIN::optimize()
'order' as is.
*/
if (!order || test_if_subpart(group_list, order))
- order= skip_sort_order ? 0 : group_list;
+ {
+ if (skip_sort_order ||
+ select_lex->master_unit()->item) // This is a subquery
+ order= NULL;
+ else
+ order= group_list;
+ }
/*
If we have an IGNORE INDEX FOR GROUP BY(fields) clause, this must be
rewritten to IGNORE INDEX FOR ORDER BY(fields).
@@ -1862,6 +1934,7 @@ int JOIN::init_execution()
if (!group_list && ! exec_tmp_table1->distinct && order && simple_order)
{
+ DBUG_PRINT("info",("Sorting for order"));
thd_proc_info(thd, "Sorting for order");
if (create_sort_index(thd, this, order,
HA_POS_ERROR, HA_POS_ERROR, TRUE))
@@ -2171,6 +2244,32 @@ JOIN::save_join_tab()
}
+void JOIN::exec()
+{
+ /*
+ Enable SHOW EXPLAIN only if we're in the top-level query.
+ */
+ thd->apc_target.enable();
+ DBUG_EXECUTE_IF("show_explain_probe_join_exec_start",
+ if (dbug_user_var_equals_int(thd,
+ "show_explain_probe_select_id",
+ select_lex->select_number))
+ dbug_serve_apcs(thd, 1);
+ );
+
+ exec_inner();
+
+ DBUG_EXECUTE_IF("show_explain_probe_join_exec_end",
+ if (dbug_user_var_equals_int(thd,
+ "show_explain_probe_select_id",
+ select_lex->select_number))
+ dbug_serve_apcs(thd, 1);
+ );
+
+ thd->apc_target.disable();
+}
+
+
/**
Exec select.
@@ -2182,13 +2281,15 @@ JOIN::save_join_tab()
@todo
When can we have here thd->net.report_error not zero?
*/
-void
-JOIN::exec()
+
+void JOIN::exec_inner()
{
List<Item> *columns_list= &fields_list;
int tmp_error;
DBUG_ENTER("JOIN::exec");
+ const bool has_group_by= this->group;
+
thd_proc_info(thd, "executing");
error= 0;
if (procedure)
@@ -2487,6 +2588,10 @@ JOIN::exec()
DBUG_PRINT("info",("Creating group table"));
/* Free first data from old join */
+
+ /*
+ psergey-todo: this is the place of pre-mature JOIN::free call.
+ */
curr_join->join_free();
if (curr_join->make_simple_join(this, curr_tmp_table))
DBUG_VOID_RETURN;
@@ -2527,11 +2632,12 @@ JOIN::exec()
}
if (curr_join->group_list)
{
- thd_proc_info(thd, "Creating sort index");
if (curr_join->join_tab == join_tab && save_join_tab())
{
DBUG_VOID_RETURN;
}
+ DBUG_PRINT("info",("Sorting for index"));
+ thd_proc_info(thd, "Creating sort index");
if (create_sort_index(thd, curr_join, curr_join->group_list,
HA_POS_ERROR, HA_POS_ERROR, FALSE) ||
make_group_fields(this, curr_join))
@@ -2797,13 +2903,39 @@ JOIN::exec()
the query. XXX: it's never shown in EXPLAIN!
OPTION_FOUND_ROWS supersedes LIMIT and is taken into account.
*/
- if (create_sort_index(thd, curr_join,
- curr_join->group_list ?
- curr_join->group_list : curr_join->order,
- curr_join->select_limit,
- (select_options & OPTION_FOUND_ROWS ?
- HA_POS_ERROR : unit->select_limit_cnt),
- curr_join->group_list ? TRUE : FALSE))
+ DBUG_PRINT("info",("Sorting for order by/group by"));
+ ORDER *order_arg=
+ curr_join->group_list ? curr_join->group_list : curr_join->order;
+ /*
+ filesort_limit: Return only this many rows from filesort().
+ We can use select_limit_cnt only if we have no group_by and 1 table.
+ This allows us to use Bounded_queue for queries like:
+ "select SQL_CALC_FOUND_ROWS * from t1 order by b desc limit 1;"
+ select_limit == HA_POS_ERROR (we need a full table scan)
+ unit->select_limit_cnt == 1 (we only need one row in the result set)
+ */
+ const ha_rows filesort_limit_arg=
+ (has_group_by || curr_join->table_count > 1)
+ ? curr_join->select_limit : unit->select_limit_cnt;
+ const ha_rows select_limit_arg=
+ select_options & OPTION_FOUND_ROWS
+ ? HA_POS_ERROR : unit->select_limit_cnt;
+
+ DBUG_PRINT("info", ("has_group_by %d "
+ "curr_join->table_count %d "
+ "curr_join->m_select_limit %d "
+ "unit->select_limit_cnt %d",
+ has_group_by,
+ curr_join->table_count,
+ (int) curr_join->select_limit,
+ (int) unit->select_limit_cnt));
+
+ if (create_sort_index(thd,
+ curr_join,
+ order_arg,
+ filesort_limit_arg,
+ select_limit_arg,
+ curr_join->group_list ? FALSE : TRUE))
DBUG_VOID_RETURN;
sortorder= curr_join->sortorder;
if (curr_join->const_tables != curr_join->table_count &&
@@ -2835,6 +2967,14 @@ JOIN::exec()
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF);
error= do_select(curr_join, curr_fields_list, NULL, procedure);
thd->limit_found_rows= curr_join->send_records;
+ if (curr_join->order &&
+ curr_join->sortorder)
+ {
+ /* Use info provided by filesort. */
+ DBUG_ASSERT(curr_join->table_count > curr_join->const_tables);
+ JOIN_TAB *tab= curr_join->join_tab + curr_join->const_tables;
+ thd->limit_found_rows= tab->records;
+ }
/* Accumulate the counts from all join iterations of all join parts. */
thd->examined_row_count+= curr_join->examined_rows;
@@ -3183,6 +3323,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
table_vector[i]=s->table=table=tables->table;
table->pos_in_table_list= tables;
error= tables->fetch_number_of_rows();
+ set_statistics_for_table(join->thd, table);
#ifdef WITH_PARTITION_STORAGE_ENGINE
const bool no_partitions_used= table->no_partitions_used;
@@ -3214,8 +3355,8 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
s->dependent= tables->dep_tables;
if (tables->schema_table)
- table->file->stats.records= 2;
- table->quick_condition_rows= table->file->stats.records;
+ table->file->stats.records= table->used_stat_records= 2;
+ table->quick_condition_rows= table->stat_records();
s->on_expr_ref= &tables->on_expr;
if (*s->on_expr_ref)
@@ -3448,7 +3589,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
// All dep. must be constants
if (s->dependent & ~(found_const_table_map))
continue;
- if (table->file->stats.records <= 1L &&
+ if (table->stat_records() <= 1L &&
(table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
!table->pos_in_table_list->embedding &&
!((outer_join & table->map) &&
@@ -3724,7 +3865,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
goto error;
/* Generate an execution plan from the found optimal join order. */
- DBUG_RETURN(join->thd->killed || get_best_combination(join));
+ DBUG_RETURN(join->thd->check_killed() || get_best_combination(join));
error:
/*
@@ -4808,7 +4949,8 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
/* set a barrier for the array of SARGABLE_PARAM */
(*sargables)[0].field= 0;
- if (my_init_dynamic_array(keyuse,sizeof(KEYUSE),20,64))
+ if (my_init_dynamic_array(keyuse,sizeof(KEYUSE),20,64,
+ MYF(MY_THREAD_SPECIFIC)))
return TRUE;
if (cond)
@@ -5417,7 +5559,7 @@ best_access_path(JOIN *join,
else
{
uint key_parts= table->actual_n_key_parts(keyinfo);
- if (!(records=keyinfo->rec_per_key[key_parts-1]))
+ if (!(records= keyinfo->actual_rec_per_key(key_parts-1)))
{ /* Prefer longer keys */
records=
((double) s->records / (double) rec *
@@ -5517,7 +5659,7 @@ best_access_path(JOIN *join,
else
{
/* Check if we have statistic about the distribution */
- if ((records= keyinfo->rec_per_key[max_key_part-1]))
+ if ((records= keyinfo->actual_rec_per_key(max_key_part-1)))
{
/*
Fix for the case where the index statistics is too
@@ -6420,7 +6562,6 @@ greedy_search(JOIN *join,
JOIN_TAB *best_table; // the next plan node to be added to the curr QEP
// ==join->tables or # tables in the sj-mat nest we're optimizing
uint n_tables __attribute__((unused));
-
DBUG_ENTER("greedy_search");
/* number of tables that remain to be optimized */
@@ -6803,7 +6944,7 @@ best_extension_by_limited_search(JOIN *join,
DBUG_ENTER("best_extension_by_limited_search");
THD *thd= join->thd;
- if (thd->killed) // Abort
+ if (thd->check_killed()) // Abort
DBUG_RETURN(TRUE);
DBUG_EXECUTE("opt", print_plan(join, idx, read_time, record_count, idx,
@@ -6960,7 +7101,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
{
DBUG_ENTER("find_best");
THD *thd= join->thd;
- if (thd->killed)
+ if (thd->check_killed())
DBUG_RETURN(TRUE);
if (!rest_tables)
{
@@ -7657,6 +7798,7 @@ get_best_combination(JOIN *join)
join->table_access_tabs= join->join_tab;
join->top_table_access_tabs_count= join->top_join_tab_count;
+
update_depend_map(join);
DBUG_RETURN(0);
}
@@ -7733,6 +7875,7 @@ static bool create_hj_key_for_table(JOIN *join, JOIN_TAB *join_tab,
keyinfo->key_length=0;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
keyinfo->flags= HA_GENERATED_KEY;
+ keyinfo->is_statistics_from_stat_tables= FALSE;
keyinfo->name= (char *) "$hj";
keyinfo->rec_per_key= (ulong*) thd->calloc(sizeof(ulong)*key_parts);
if (!keyinfo->rec_per_key)
@@ -7900,6 +8043,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
j->ref.null_rejecting= 0;
j->ref.disable_cache= FALSE;
j->ref.null_ref_part= NO_REF_PART;
+ j->ref.const_ref_part_map= 0;
keyuse=org_keyuse;
store_key **ref_key= j->ref.key_copy;
@@ -7935,6 +8079,13 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
if (keyuse->null_rejecting)
j->ref.null_rejecting |= 1 << i;
keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables;
+ /*
+ Todo: we should remove this check for thd->lex->describe on the next
+ line. With SHOW EXPLAIN code, EXPLAIN printout code no longer depends
+ on it. However, removing the check caused change in lots of query
+ plans! Does the optimizer depend on the contents of
+ table_ref->key_copy ? If yes, do we produce incorrect EXPLAINs?
+ */
if (!keyuse->val->used_tables() && !thd->lex->describe)
{ // Compare against constant
store_key_item tmp(thd,
@@ -7947,6 +8098,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
if (thd->is_fatal_error)
DBUG_RETURN(TRUE);
tmp.copy();
+ j->ref.const_ref_part_map |= key_part_map(1) << i ;
}
else
*ref_key++= get_store_key(thd,
@@ -8064,6 +8216,7 @@ JOIN::make_simple_join(JOIN *parent, TABLE *temp_table)
!(parent->join_tab_reexec= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB))))
DBUG_RETURN(TRUE); /* purecov: inspected */
+ // psergey-todo: here, save the pointer for original join_tabs.
join_tab= parent->join_tab_reexec;
table= &parent->table_reexec[0]; parent->table_reexec[0]= temp_table;
table_count= top_join_tab_count= 1;
@@ -9435,7 +9588,7 @@ end_sj_materialize(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
if (item->is_null())
DBUG_RETURN(NESTED_LOOP_OK);
}
- fill_record(thd, table->field, sjm->sjm_table_cols, TRUE, FALSE);
+ fill_record(thd, table, table->field, sjm->sjm_table_cols, TRUE, FALSE);
if (thd->is_error())
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
if ((error= table->file->ha_write_tmp_row(table->record[0])))
@@ -10300,7 +10453,7 @@ double JOIN_TAB::scan_time()
}
else
{
- found_records= records= table->file->stats.records;
+ found_records= records= table->stat_records();
read_time= table->file->scan_time();
/*
table->quick_condition_rows has already been set to
@@ -10311,7 +10464,7 @@ double JOIN_TAB::scan_time()
}
else
{
- found_records= records=table->file->stats.records;
+ found_records= records=table->stat_records();
read_time= found_records ? (double)found_records: 10.0;// TODO:fix this stub
res= read_time;
}
@@ -10329,7 +10482,7 @@ ha_rows JOIN_TAB::get_examined_rows()
{
ha_rows examined_rows;
- if (select && select->quick)
+ if (select && select->quick && use_quick != 2)
examined_rows= select->quick->records;
else if (type == JT_NEXT || type == JT_ALL ||
type == JT_HASH || type ==JT_HASH_NEXT)
@@ -10352,7 +10505,7 @@ ha_rows JOIN_TAB::get_examined_rows()
handler->info(HA_STATUS_VARIABLE) has been called in
make_join_statistics()
*/
- examined_rows= table->file->stats.records;
+ examined_rows= table->stat_records();
}
}
}
@@ -10383,9 +10536,18 @@ bool JOIN_TAB::preread_init()
mysql_handle_single_derived(join->thd->lex,
derived, DT_CREATE | DT_FILL))
return TRUE;
+
preread_init_done= TRUE;
if (select && select->quick)
select->quick->replace_handler(table->file);
+
+ DBUG_EXECUTE_IF("show_explain_probe_join_tab_preread",
+ if (dbug_user_var_equals_int(join->thd,
+ "show_explain_probe_select_id",
+ join->select_lex->select_number))
+ dbug_serve_apcs(join->thd, 1);
+ );
+
return FALSE;
}
@@ -10593,6 +10755,8 @@ void JOIN::cleanup(bool full)
{
DBUG_ENTER("JOIN::cleanup");
DBUG_PRINT("enter", ("full %u", (uint) full));
+
+ have_query_plan= QEP_DELETED;
if (table)
{
@@ -11099,14 +11263,6 @@ public:
COND_CMP(Item *a,Item_func *b) :and_level(a),cmp_func(b) {}
};
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class I_List<COND_CMP>;
-template class I_List_iterator<COND_CMP>;
-template class List<Item_func_match>;
-template class List_iterator<Item_func_match>;
-#endif
-
-
/**
Find the multiple equality predicate containing a field.
@@ -14219,7 +14375,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if (param->precomputed_group_by)
copy_func_count+= param->sum_func_count;
- init_sql_alloc(&own_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+ init_sql_alloc(&own_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
if (!multi_alloc_root(&own_root,
&table, sizeof(*table),
@@ -14715,8 +14871,11 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
keyinfo->usable_key_parts=keyinfo->key_parts= param->group_parts;
keyinfo->ext_key_parts= keyinfo->key_parts;
keyinfo->key_length=0;
- keyinfo->rec_per_key=0;
+ keyinfo->rec_per_key=NULL;
+ keyinfo->read_stats= NULL;
+ keyinfo->collected_stats= NULL;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
+ keyinfo->is_statistics_from_stat_tables= FALSE;
keyinfo->name= (char*) "group_key";
ORDER *cur_group= group;
for (; cur_group ; cur_group= cur_group->next, key_part_info++)
@@ -14829,7 +14988,10 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
keyinfo->key_length= 0; // Will compute the sum of the parts below.
keyinfo->name= (char*) "distinct_key";
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
+ keyinfo->is_statistics_from_stat_tables= FALSE;
keyinfo->rec_per_key=0;
+ keyinfo->read_stats= NULL;
+ keyinfo->collected_stats= NULL;
/*
Create an extra field to hold NULL bits so that unique indexes on
@@ -15532,7 +15694,7 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table,
DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;);
if (write_err)
goto err;
- if (thd->killed)
+ if (thd->check_killed())
{
thd->send_kill_message();
goto err_killed;
@@ -15791,6 +15953,14 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
else
{
DBUG_ASSERT(join->table_count);
+
+ DBUG_EXECUTE_IF("show_explain_probe_do_select",
+ if (dbug_user_var_equals_int(join->thd,
+ "show_explain_probe_select_id",
+ join->select_lex->select_number))
+ dbug_serve_apcs(join->thd, 1);
+ );
+
if (join->outer_ref_cond && !join->outer_ref_cond->val_int())
error= NESTED_LOOP_NO_MORE_ROWS;
else
@@ -15921,7 +16091,7 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
rc= sub_select(join, join_tab, end_of_records);
DBUG_RETURN(rc);
}
- if (join->thd->killed)
+ if (join->thd->check_killed())
{
/* The user has aborted the execution of the query */
join->thd->send_kill_message();
@@ -16208,7 +16378,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
DBUG_RETURN(NESTED_LOOP_ERROR);
if (error < 0)
DBUG_RETURN(NESTED_LOOP_NO_MORE_ROWS);
- if (join->thd->killed) // Aborted by user
+ if (join->thd->check_killed()) // Aborted by user
{
join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
@@ -16975,6 +17145,14 @@ int read_first_record_seq(JOIN_TAB *tab)
static int
test_if_quick_select(JOIN_TAB *tab)
{
+ DBUG_EXECUTE_IF("show_explain_probe_test_if_quick_select",
+ if (dbug_user_var_equals_int(tab->join->thd,
+ "show_explain_probe_select_id",
+ tab->join->select_lex->select_number))
+ dbug_serve_apcs(tab->join->thd, 1);
+ );
+
+
delete tab->select->quick;
tab->select->quick=0;
return tab->select->test_quick_select(tab->join->thd, tab->keys,
@@ -17215,7 +17393,25 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if ((error= join->result->send_data(*join->fields)))
DBUG_RETURN(error < 0 ? NESTED_LOOP_OK : NESTED_LOOP_ERROR);
}
- if (++join->send_records >= join->unit->select_limit_cnt &&
+
+ ++join->send_records;
+ if (join->send_records >= join->unit->select_limit_cnt &&
+ !join->do_send_rows)
+ {
+ /*
+ If filesort is used for sorting, stop after select_limit_cnt+1
+ records are read. Because of optimization in some cases it can
+ provide only select_limit_cnt+1 records.
+ */
+ if (join->order &&
+ join->sortorder &&
+ join->select_options & OPTION_FOUND_ROWS)
+ {
+ DBUG_PRINT("info", ("filesort NESTED_LOOP_QUERY_LIMIT"));
+ DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT);
+ }
+ }
+ if (join->send_records >= join->unit->select_limit_cnt &&
join->do_send_rows)
{
if (join->select_options & OPTION_FOUND_ROWS)
@@ -17436,7 +17632,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
}
end:
- if (join->thd->killed)
+ if (join->thd->check_killed())
{
join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
@@ -17520,7 +17716,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
join->send_records++;
end:
- if (join->thd->killed)
+ if (join->thd->check_killed())
{
join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
@@ -17570,7 +17766,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
}
}
- if (join->thd->killed)
+ if (join->thd->check_killed())
{
join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
@@ -17648,7 +17844,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (join->procedure)
join->procedure->add();
end:
- if (join->thd->killed)
+ if (join->thd->check_killed())
{
join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
@@ -18653,7 +18849,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
uint saved_best_key_parts= 0;
int best_key_direction= 0;
JOIN *join= tab->join;
- ha_rows table_records= table->file->stats.records;
+ ha_rows table_records= table->stat_records();
test_if_cheaper_ordering(tab, order, table, usable_keys,
ref_key, select_limit,
@@ -18768,7 +18964,7 @@ check_reverse_order:
{
tab->ref.key= -1;
tab->ref.key_parts= 0;
- if (select_limit < table->file->stats.records)
+ if (select_limit < table->stat_records())
tab->limit= select_limit;
table->disable_keyread();
}
@@ -18923,6 +19119,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
{
uint length= 0;
ha_rows examined_rows;
+ ha_rows found_rows;
+ ha_rows filesort_retval= HA_POS_ERROR;
TABLE *table;
SQL_SELECT *select;
JOIN_TAB *tab;
@@ -19011,7 +19209,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
goto err; /* purecov: inspected */
table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
- MYF(MY_WME | MY_ZEROFILL));
+ MYF(MY_WME | MY_ZEROFILL|
+ MY_THREAD_SPECIFIC));
table->status=0; // May be wrong if quick_select
// If table has a range, move it to select
@@ -19054,9 +19253,11 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
goto err;
if (table->s->tmp_table)
table->file->info(HA_STATUS_VARIABLE); // Get record count
- table->sort.found_records=filesort(thd, table,join->sortorder, length,
- select, filesort_limit, 0,
- &examined_rows);
+ filesort_retval= filesort(thd, table, join->sortorder, length,
+ select, filesort_limit, 0,
+ &examined_rows, &found_rows);
+ table->sort.found_records= filesort_retval;
+ tab->records= found_rows; // For SQL_CALC_ROWS
if (quick_created)
{
@@ -19074,50 +19275,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
*(join->pre_sort_join_tab)= *tab;
- /*TODO: here, close the index scan, cancel index-only read. */
- tab->records= table->sort.found_records; // For SQL_CALC_ROWS
-#if 0
- /* MariaDB doesn't need the following: */
- if (select)
- {
- /*
- We need to preserve tablesort's output resultset here, because
- QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT (called by
- SQL_SELECT::cleanup()) may free it assuming it's the result of the quick
- select operation that we no longer need. Note that all the other parts of
- this data structure are cleaned up when
- QUICK_INDEX_MERGE_SELECT::get_next encounters end of data, so the next
- SQL_SELECT::cleanup() call changes sort.io_cache alone.
- */
- IO_CACHE *tablesort_result_cache;
-
- tablesort_result_cache= table->sort.io_cache;
- table->sort.io_cache= NULL;
- // select->cleanup(); // filesort did select
- /*
- If a quick object was created outside of create_sort_index()
- that might be reused, then do not call select->cleanup() since
- it will delete the quick object.
- */
- if (!keep_quick)
- {
- select->cleanup();
- /*
- The select object should now be ready for the next use. If it
- is re-used then there exists a backup copy of this join tab
- which has the pointer to it. The join tab will be restored in
- JOIN::reset(). So here we just delete the pointer to it.
- */
- tab->select= NULL;
- // If we deleted the quick select object we need to clear quick_keys
- table->quick_keys.clear_all();
- table->intersect_keys.clear_all();
- }
- // Restore the output resultset
- table->sort.io_cache= tablesort_result_cache;
- }
-#endif
tab->select=NULL;
tab->set_select_cond(NULL, __LINE__);
tab->type=JT_ALL; // Read with normal read_record
@@ -19128,12 +19286,11 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
goto err;
tab->join->examined_rows+=examined_rows;
- DBUG_RETURN(table->sort.found_records == HA_POS_ERROR);
+ DBUG_RETURN(filesort_retval == HA_POS_ERROR);
err:
DBUG_RETURN(-1);
}
-
void JOIN::clean_pre_sort_join_tab()
{
//TABLE *table= pre_sort_join_tab->table;
@@ -19142,29 +19299,6 @@ void JOIN::clean_pre_sort_join_tab()
the table already deleted by st_select_lex_unit::cleanup().
We rely on that fake_select_lex didn't have quick select.
*/
-#if 0
- if (pre_sort_join_tab->select && pre_sort_join_tab->select->quick)
- {
- /*
- We need to preserve tablesort's output resultset here, because
- QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT (called by
- SQL_SELECT::cleanup()) may free it assuming it's the result of the quick
- select operation that we no longer need. Note that all the other parts of
- this data structure are cleaned up when
- QUICK_INDEX_MERGE_SELECT::get_next encounters end of data, so the next
- SQL_SELECT::cleanup() call changes sort.io_cache alone.
- */
- IO_CACHE *tablesort_result_cache;
-
- tablesort_result_cache= table->sort.io_cache;
- table->sort.io_cache= NULL;
- pre_sort_join_tab->select->cleanup();
- table->quick_keys.clear_all(); // as far as we cleanup select->quick
- table->intersect_keys.clear_all();
- table->sort.io_cache= tablesort_result_cache;
- }
-#endif
- //table->disable_keyread(); // Restore if we used indexes
if (pre_sort_join_tab->select && pre_sort_join_tab->select->quick)
{
pre_sort_join_tab->select->cleanup();
@@ -19288,7 +19422,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
error= file->ha_rnd_next(record);
for (;;)
{
- if (thd->killed)
+ if (thd->check_killed())
{
thd->send_kill_message();
error=0;
@@ -19408,7 +19542,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
for (;;)
{
uchar *org_key_pos;
- if (thd->killed)
+ if (thd->check_killed())
{
thd->send_kill_message();
error=0;
@@ -19484,7 +19618,7 @@ SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length,
pos= sort= sortorder;
if (!pos)
- return 0;
+ DBUG_RETURN(0);
for (;order;order=order->next,pos++)
{
@@ -19501,6 +19635,7 @@ SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length,
else
pos->item= *order->item;
pos->reverse=! order->asc;
+ DBUG_ASSERT(pos->field != NULL || pos->item != NULL);
}
*length=count;
DBUG_RETURN(sort);
@@ -21455,29 +21590,155 @@ void JOIN::clear()
}
}
+
+/*
+ Print an EXPLAIN line with all NULLs and given message in the 'Extra' column
+*/
+int print_explain_message_line(select_result_sink *result,
+ SELECT_LEX *select_lex,
+ bool on_the_fly,
+ uint8 options,
+ const char *message)
+{
+ const CHARSET_INFO *cs= system_charset_info;
+ Item *item_null= new Item_null();
+ List<Item> item_list;
+
+ if (on_the_fly)
+ select_lex->set_explain_type(on_the_fly);
+
+ item_list.push_back(new Item_int((int32)
+ select_lex->select_number));
+ item_list.push_back(new Item_string(select_lex->type,
+ strlen(select_lex->type), cs));
+ for (uint i=0 ; i < 7; i++)
+ item_list.push_back(item_null);
+ if (options & DESCRIBE_PARTITIONS)
+ item_list.push_back(item_null);
+ if (options & DESCRIBE_EXTENDED)
+ item_list.push_back(item_null);
+
+ item_list.push_back(new Item_string(message,strlen(message),cs));
+
+ if (result->send_data(item_list))
+ return 1;
+ return 0;
+}
+
+
+int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly,
+ SELECT_LEX *select_lex, uint8 explain_flags)
+{
+ const CHARSET_INFO *cs= system_charset_info;
+ Item *item_null= new Item_null();
+ List<Item> item_list;
+ if (on_the_fly)
+ select_lex->set_explain_type(on_the_fly);
+ /*
+ here we assume that the query will return at least two rows, so we
+ show "filesort" in EXPLAIN. Of course, sometimes we'll be wrong
+ and no filesort will be actually done, but executing all selects in
+ the UNION to provide precise EXPLAIN information will hardly be
+ appreciated :)
+ */
+ char table_name_buffer[SAFE_NAME_LEN];
+ item_list.empty();
+ /* id */
+ item_list.push_back(new Item_null);
+ /* select_type */
+ item_list.push_back(new Item_string(select_lex->type,
+ strlen(select_lex->type),
+ cs));
+ /* table */
+ {
+ SELECT_LEX *sl= select_lex->master_unit()->first_select();
+ uint len= 6, lastop= 0;
+ memcpy(table_name_buffer, STRING_WITH_LEN("<union"));
+ for (; sl && len + lastop + 5 < NAME_LEN; sl= sl->next_select())
+ {
+ len+= lastop;
+ lastop= my_snprintf(table_name_buffer + len, NAME_LEN - len,
+ "%u,", sl->select_number);
+ }
+ if (sl || len + lastop >= NAME_LEN)
+ {
+ memcpy(table_name_buffer + len, STRING_WITH_LEN("...>") + 1);
+ len+= 4;
+ }
+ else
+ {
+ len+= lastop;
+ table_name_buffer[len - 1]= '>'; // change ',' to '>'
+ }
+ item_list.push_back(new Item_string(table_name_buffer, len, cs));
+ }
+ /* partitions */
+ if (explain_flags & DESCRIBE_PARTITIONS)
+ item_list.push_back(item_null);
+ /* type */
+ item_list.push_back(new Item_string(join_type_str[JT_ALL],
+ strlen(join_type_str[JT_ALL]),
+ cs));
+ /* possible_keys */
+ item_list.push_back(item_null);
+ /* key*/
+ item_list.push_back(item_null);
+ /* key_len */
+ item_list.push_back(item_null);
+ /* ref */
+ item_list.push_back(item_null);
+ /* in_rows */
+ if (explain_flags & DESCRIBE_EXTENDED)
+ item_list.push_back(item_null);
+ /* rows */
+ item_list.push_back(item_null);
+ /* extra */
+ if (select_lex->master_unit()->global_parameters->order_list.first)
+ item_list.push_back(new Item_string("Using filesort",
+ 14, cs));
+ else
+ item_list.push_back(new Item_string("", 0, cs));
+
+ if (result->send_data(item_list))
+ return 1;
+ return 0;
+}
+
+
/**
EXPLAIN handling.
- Send a description about what how the select will be done to stdout.
+ Produce lines explaining execution of *this* select (not including children
+ selects)
+ @param on_the_fly TRUE <=> we're being executed on-the-fly, so don't make
+ modifications to any select's data structures
*/
-static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
- bool distinct,const char *message)
+int JOIN::print_explain(select_result_sink *result, uint8 explain_flags,
+ bool on_the_fly,
+ bool need_tmp_table, bool need_order,
+ bool distinct, const char *message)
{
List<Item> field_list;
List<Item> item_list;
+ JOIN *join= this; /* Legacy: this code used to be a non-member function */
THD *thd=join->thd;
- select_result *result=join->result;
Item *item_null= new Item_null();
CHARSET_INFO *cs= system_charset_info;
int quick_type;
- DBUG_ENTER("select_describe");
+ int error= 0;
+ DBUG_ENTER("JOIN::print_explain");
DBUG_PRINT("info", ("Select 0x%lx, type %s, message %s",
(ulong)join->select_lex, join->select_lex->type,
message ? message : "NULL"));
+ DBUG_ASSERT(on_the_fly? have_query_plan == QEP_AVAILABLE: TRUE);
/* Don't log this into the slow query log */
- thd->server_status&= ~(SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED);
- join->unit->offset_limit_cnt= 0;
+
+ if (!on_the_fly)
+ {
+ thd->server_status&= ~(SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED);
+ join->unit->offset_limit_cnt= 0;
+ }
/*
NOTE: the number/types of items pushed into item_list must be in sync with
@@ -21485,98 +21746,29 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
*/
if (message)
{
- item_list.push_back(new Item_int((int32)
- join->select_lex->select_number));
- item_list.push_back(new Item_string(join->select_lex->type,
- strlen(join->select_lex->type), cs));
- for (uint i=0 ; i < 7; i++)
- item_list.push_back(item_null);
- if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
- item_list.push_back(item_null);
- if (join->thd->lex->describe & DESCRIBE_EXTENDED)
- item_list.push_back(item_null);
-
- item_list.push_back(new Item_string(message,strlen(message),cs));
- if (result->send_data(item_list))
- join->error= 1;
+ if (print_explain_message_line(result, join->select_lex, on_the_fly,
+ explain_flags, message))
+ error= 1;
+
}
else if (join->select_lex == join->unit->fake_select_lex)
{
- /*
- here we assume that the query will return at least two rows, so we
- show "filesort" in EXPLAIN. Of course, sometimes we'll be wrong
- and no filesort will be actually done, but executing all selects in
- the UNION to provide precise EXPLAIN information will hardly be
- appreciated :)
- */
- char table_name_buffer[SAFE_NAME_LEN];
- item_list.empty();
- /* id */
- item_list.push_back(new Item_null);
- /* select_type */
- item_list.push_back(new Item_string(join->select_lex->type,
- strlen(join->select_lex->type),
- cs));
- /* table */
- {
- SELECT_LEX *sl= join->unit->first_select();
- uint len= 6, lastop= 0;
- memcpy(table_name_buffer, STRING_WITH_LEN("<union"));
- for (; sl && len + lastop + 5 < NAME_LEN; sl= sl->next_select())
- {
- len+= lastop;
- lastop= my_snprintf(table_name_buffer + len, NAME_LEN - len,
- "%u,", sl->select_number);
- }
- if (sl || len + lastop >= NAME_LEN)
- {
- memcpy(table_name_buffer + len, STRING_WITH_LEN("...>") + 1);
- len+= 4;
- }
- else
- {
- len+= lastop;
- table_name_buffer[len - 1]= '>'; // change ',' to '>'
- }
- item_list.push_back(new Item_string(table_name_buffer, len, cs));
- }
- /* partitions */
- if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
- item_list.push_back(item_null);
- /* type */
- item_list.push_back(new Item_string(join_type_str[JT_ALL],
- strlen(join_type_str[JT_ALL]),
- cs));
- /* possible_keys */
- item_list.push_back(item_null);
- /* key*/
- item_list.push_back(item_null);
- /* key_len */
- item_list.push_back(item_null);
- /* ref */
- item_list.push_back(item_null);
- /* in_rows */
- if (join->thd->lex->describe & DESCRIBE_EXTENDED)
- item_list.push_back(item_null);
- /* rows */
- item_list.push_back(item_null);
- /* extra */
- if (join->unit->global_parameters->order_list.first)
- item_list.push_back(new Item_string("Using filesort",
- 14, cs));
- else
- item_list.push_back(new Item_string("", 0, cs));
-
- if (result->send_data(item_list))
- join->error= 1;
+ if (print_fake_select_lex_join(result, on_the_fly,
+ join->select_lex,
+ explain_flags))
+ error= 1;
}
else if (!join->select_lex->master_unit()->derived ||
join->select_lex->master_unit()->derived->is_materialized_derived())
{
table_map used_tables=0;
+ //if (!join->select_lex->type)
+ if (on_the_fly)
+ join->select_lex->set_explain_type(on_the_fly);
bool printing_materialize_nest= FALSE;
uint select_id= join->select_lex->select_number;
+ JOIN_TAB* const first_top_tab= first_breadth_first_tab(join, WALK_OPTIMIZATION_TABS);
for (JOIN_TAB *tab= first_breadth_first_tab(join, WALK_OPTIMIZATION_TABS); tab;
tab= next_breadth_first_tab(join, WALK_OPTIMIZATION_TABS, tab))
@@ -21611,6 +21803,8 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
tmp3.length(0);
tmp4.length(0);
quick_type= -1;
+ QUICK_SELECT_I *quick= NULL;
+ JOIN_TAB *saved_join_tab= NULL;
/* Don't show eliminated tables */
if (table->map & join->eliminated_tables)
@@ -21619,6 +21813,13 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
continue;
}
+ if (join->table_access_tabs == join->join_tab &&
+ tab == (first_top_tab + join->const_tables) && pre_sort_join_tab)
+ {
+ saved_join_tab= tab;
+ tab= pre_sort_join_tab;
+ }
+
item_list.empty();
/* id */
item_list.push_back(new Item_uint((uint32)select_id));
@@ -21627,17 +21828,19 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
join->select_lex->type;
item_list.push_back(new Item_string(stype, strlen(stype), cs));
+ enum join_type tab_type= tab->type;
if ((tab->type == JT_ALL || tab->type == JT_HASH) &&
- tab->select && tab->select->quick)
+ tab->select && tab->select->quick && tab->use_quick != 2)
{
+ quick= tab->select->quick;
quick_type= tab->select->quick->get_type();
if ((quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) ||
(quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT) ||
(quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) ||
(quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION))
- tab->type= tab->type == JT_ALL ? JT_INDEX_MERGE : JT_HASH_INDEX_MERGE;
+ tab_type= tab->type == JT_ALL ? JT_INDEX_MERGE : JT_HASH_INDEX_MERGE;
else
- tab->type= tab->type == JT_ALL ? JT_RANGE : JT_HASH_RANGE;
+ tab_type= tab->type == JT_ALL ? JT_RANGE : JT_HASH_RANGE;
}
/* table */
@@ -21666,7 +21869,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
strlen(real_table->alias), cs));
}
/* "partitions" column */
- if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
+ if (explain_flags & DESCRIBE_PARTITIONS)
{
#ifdef WITH_PARTITION_STORAGE_ENGINE
partition_info *part_info;
@@ -21685,8 +21888,8 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
#endif
}
/* "type" column */
- item_list.push_back(new Item_string(join_type_str[tab->type],
- strlen(join_type_str[tab->type]),
+ item_list.push_back(new Item_string(join_type_str[tab_type],
+ strlen(join_type_str[tab_type]),
cs));
/* Build "possible_keys" value and add it to item_list */
if (!tab->keys.is_clear_all())
@@ -21710,7 +21913,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
item_list.push_back(item_null);
/* Build "key", "key_len", and "ref" values and add them to item_list */
- if (tab->type == JT_NEXT)
+ if (tab_type == JT_NEXT)
{
key_info= table->key_info+tab->index;
key_len= key_info->key_length;
@@ -21729,22 +21932,30 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
length= (longlong10_to_str(key_len, keylen_str_buf, 10) -
keylen_str_buf);
tmp3.append(keylen_str_buf, length, cs);
- if (tab->ref.key_parts)
+ if (tab->ref.key_parts && tab_type != JT_FT)
{
- for (store_key **ref=tab->ref.key_copy ; *ref ; ref++)
+ store_key **ref=tab->ref.key_copy;
+ for (uint kp= 0; kp < tab->ref.key_parts; kp++)
{
if (tmp4.length())
tmp4.append(',');
- tmp4.append((*ref)->name(), strlen((*ref)->name()), cs);
+
+ if ((key_part_map(1) << kp) & tab->ref.const_ref_part_map)
+ tmp4.append("const");
+ else
+ {
+ tmp4.append((*ref)->name(), strlen((*ref)->name()), cs);
+ ref++;
+ }
}
}
}
- if (is_hj && tab->type != JT_HASH)
+ if (is_hj && tab_type != JT_HASH)
{
tmp2.append(':');
tmp3.append(':');
}
- if (tab->type == JT_HASH_NEXT)
+ if (tab_type == JT_HASH_NEXT)
{
register uint length;
key_info= table->key_info+tab->index;
@@ -21754,9 +21965,9 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
keylen_str_buf);
tmp3.append(keylen_str_buf, length, cs);
}
- if (tab->type != JT_CONST && tab->select && tab->select->quick)
+ if (tab->type != JT_CONST && tab->select && quick)
tab->select->quick->add_keys_and_lengths(&tmp2, &tmp3);
- if (key_info || (tab->select && tab->select->quick))
+ if (key_info || (tab->select && quick))
{
if (tmp2.length())
item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs));
@@ -21766,7 +21977,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
item_list.push_back(new Item_string(tmp3.ptr(),tmp3.length(),cs));
else
item_list.push_back(item_null);
- if (key_info && tab->type != JT_NEXT)
+ if (key_info && tab_type != JT_NEXT)
item_list.push_back(new Item_string(tmp4.ptr(),tmp4.length(),cs));
else
item_list.push_back(item_null);
@@ -21809,7 +22020,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
table_list->schema_table)
{
/* in_rows */
- if (join->thd->lex->describe & DESCRIBE_EXTENDED)
+ if (explain_flags & DESCRIBE_EXTENDED)
item_list.push_back(item_null);
/* rows */
item_list.push_back(item_null);
@@ -21822,7 +22033,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
MY_INT64_NUM_DECIMAL_DIGITS));
/* Add "filtered" field to item_list. */
- if (join->thd->lex->describe & DESCRIBE_EXTENDED)
+ if (explain_flags & DESCRIBE_EXTENDED)
{
float f= 0.0;
if (examined_rows)
@@ -21834,11 +22045,11 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
/* Build "Extra" field and add it to item_list. */
key_read=table->key_read;
- if ((tab->type == JT_NEXT || tab->type == JT_CONST) &&
+ if ((tab_type == JT_NEXT || tab_type == JT_CONST) &&
table->covering_keys.is_set(tab->index))
key_read=1;
if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT &&
- !((QUICK_ROR_INTERSECT_SELECT*)tab->select->quick)->need_to_fetch_row)
+ !((QUICK_ROR_INTERSECT_SELECT*)quick)->need_to_fetch_row)
key_read=1;
if (tab->info)
@@ -21866,8 +22077,8 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
uint keyno= MAX_KEY;
if (tab->ref.key_parts)
keyno= tab->ref.key;
- else if (tab->select && tab->select->quick)
- keyno = tab->select->quick->index;
+ else if (tab->select && quick)
+ keyno = quick->index;
if (keyno != MAX_KEY && keyno == table->file->pushed_idx_cond_keyno &&
table->file->pushed_idx_cond)
@@ -21906,7 +22117,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
{
extra.append(STRING_WITH_LEN("; Using where with pushed "
"condition"));
- if (thd->lex->describe & DESCRIBE_EXTENDED)
+ if (explain_flags & DESCRIBE_EXTENDED)
{
extra.append(STRING_WITH_LEN(": "));
((COND *)pushed_cond)->print(&extra, QT_ORDINARY);
@@ -21999,7 +22210,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
extra.append(STRING_WITH_LEN("; End temporary"));
else if (tab->do_firstmatch)
{
- if (tab->do_firstmatch == join->join_tab - 1)
+ if (tab->do_firstmatch == /*join->join_tab*/ first_top_tab - 1)
extra.append(STRING_WITH_LEN("; FirstMatch"));
else
{
@@ -22046,12 +22257,34 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
item_list.push_back(new Item_string(str, len, cs));
}
+ if (saved_join_tab)
+ tab= saved_join_tab;
+
// For next iteration
used_tables|=table->map;
if (result->send_data(item_list))
- join->error= 1;
+ error= 1;
}
}
+ DBUG_RETURN(error);
+}
+
+
+/*
+ See st_select_lex::print_explain() for the SHOW EXPLAIN counterpart
+*/
+
+static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
+ bool distinct,const char *message)
+{
+ THD *thd=join->thd;
+ select_result *result=join->result;
+ DBUG_ENTER("select_describe");
+ join->error= join->print_explain(result, thd->lex->describe,
+ FALSE, /* Not on-the-fly */
+ need_tmp_table, need_order, distinct,
+ message);
+
for (SELECT_LEX_UNIT *unit= join->select_lex->first_inner_unit();
unit;
unit= unit->next_unit())
@@ -22094,7 +22327,7 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
for (SELECT_LEX *sl= first; sl; sl= sl->next_select())
{
- sl->set_explain_type();
+ sl->set_explain_type(FALSE);
sl->options|= SELECT_DESCRIBE;
}
@@ -22210,6 +22443,7 @@ static void print_join(THD *thd,
List_iterator_fast<TABLE_LIST> ti(*tables);
TABLE_LIST **table;
uint non_const_tables= 0;
+ DBUG_ENTER("print_join");
for (TABLE_LIST *t= ti++; t ; t= ti++)
{
@@ -22223,13 +22457,13 @@ static void print_join(THD *thd,
if (!non_const_tables)
{
str->append(STRING_WITH_LEN("dual"));
- return; // all tables were optimized away
+ DBUG_VOID_RETURN; // all tables were optimized away
}
ti.rewind();
if (!(table= (TABLE_LIST **)thd->alloc(sizeof(TABLE_LIST*) *
non_const_tables)))
- return; // out of memory
+ DBUG_VOID_RETURN; // out of memory
TABLE_LIST *tmp, **t= table + (non_const_tables - 1);
while ((tmp= ti++))
@@ -22268,6 +22502,7 @@ static void print_join(THD *thd,
}
print_table_array(thd, eliminated_tables, str, table,
table + non_const_tables, query_type);
+ DBUG_VOID_RETURN;
}
/**
@@ -22776,7 +23011,8 @@ JOIN::reoptimize(Item *added_where, table_map join_tables,
reset_query_plan();
if (!keyuse.buffer &&
- my_init_dynamic_array(&keyuse, sizeof(KEYUSE), 20, 64))
+ my_init_dynamic_array(&keyuse, sizeof(KEYUSE), 20, 64,
+ MYF(MY_THREAD_SPECIFIC)))
{
delete_dynamic(&added_keyuse);
return REOPT_ERROR;
@@ -22909,7 +23145,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
int best_key= -1;
bool is_best_covering= FALSE;
double fanout= 1;
- ha_rows table_records= table->file->stats.records;
+ ha_rows table_records= table->stat_records();
bool group= join && join->group && order == join->group_list;
ha_rows ref_key_quick_rows= HA_POS_ERROR;
const bool has_limit= (select_limit_arg != HA_POS_ERROR);
@@ -23001,7 +23237,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
if (used_key_parts > used_index_parts)
used_pk_parts= used_key_parts-used_index_parts;
rec_per_key= used_key_parts ?
- keyinfo->rec_per_key[used_key_parts-1] : 1;
+ keyinfo->actual_rec_per_key(used_key_parts-1) : 1;
/* Take into account the selectivity of the used pk prefix */
if (used_pk_parts)
{
@@ -23016,14 +23252,14 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
rec_per_key= 1;
if (rec_per_key > 1)
{
- rec_per_key*= pkinfo->rec_per_key[used_pk_parts-1];
- rec_per_key/= pkinfo->rec_per_key[0];
+ rec_per_key*= pkinfo->actual_rec_per_key(used_pk_parts-1);
+ rec_per_key/= pkinfo->actual_rec_per_key(0);
/*
The value of rec_per_key for the extended key has
to be adjusted accordingly if some components of
the secondary key are included in the primary key.
*/
- for(uint i= 0; i < used_pk_parts; i++)
+ for(uint i= 1; i < used_pk_parts; i++)
{
if (pkinfo->key_part[i].field->key_start.is_set(nr))
{
@@ -23031,10 +23267,9 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
We presume here that for any index rec_per_key[i] != 0
if rec_per_key[0] != 0.
*/
- DBUG_ASSERT(pkinfo->rec_per_key[i]);
- DBUG_ASSERT(i > 0);
- rec_per_key*= pkinfo->rec_per_key[i-1];
- rec_per_key/= pkinfo->rec_per_key[i];
+ DBUG_ASSERT(pkinfo->actual_rec_per_key(i));
+ rec_per_key*= pkinfo->actual_rec_per_key(i-1);
+ rec_per_key/= pkinfo->actual_rec_per_key(i);
}
}
}
@@ -23079,7 +23314,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
select_limit= (ha_rows) (select_limit *
(double) table_records /
table->quick_condition_rows);
- rec_per_key= keyinfo->rec_per_key[keyinfo->key_parts-1];
+ rec_per_key= keyinfo->actual_rec_per_key(keyinfo->key_parts-1);
set_if_bigger(rec_per_key, 1);
/*
Here we take into account the fact that rows are
@@ -23220,7 +23455,7 @@ uint get_index_for_order(ORDER *order, TABLE *table, SQL_SELECT *select,
Update quick_condition_rows since single table UPDATE/DELETE procedures
don't call make_join_statistics() and leave this variable uninitialized.
*/
- table->quick_condition_rows= table->file->stats.records;
+ table->quick_condition_rows= table->stat_records();
int key, direction;
if (test_if_cheaper_ordering(NULL, order, table,
diff --git a/sql/sql_select.h b/sql/sql_select.h
index bac29b96c5a..638de926d75 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -101,6 +101,13 @@ typedef struct st_table_ref
uchar *key_buff; ///< value to look for with key
uchar *key_buff2; ///< key_buff+key_length
store_key **key_copy; //
+
+ /*
+ Bitmap of key parts which refer to constants. key_copy only has copiers for
+ non-const key parts.
+ */
+ key_part_map const_ref_part_map;
+
Item **items; ///< val()'s for each keypart
/*
Array of pointers to trigger variables. Some/all of the pointers may be
@@ -917,7 +924,7 @@ public:
*/
JOIN_TAB *table_access_tabs;
uint top_table_access_tabs_count;
-
+
JOIN_TAB **map2table; ///< mapping between table indexes and JOIN_TABs
JOIN_TAB *join_tab_save; ///< saved join_tab for subquery reexecution
@@ -1185,8 +1192,14 @@ public:
const char *zero_result_cause; ///< not 0 if exec must return zero result
bool union_part; ///< this subselect is part of union
+
+ enum join_optimization_state { NOT_OPTIMIZED=0,
+ OPTIMIZATION_IN_PROGRESS=1,
+ OPTIMIZATION_DONE=2};
bool optimized; ///< flag to avoid double optimization in EXPLAIN
bool initialized; ///< flag to avoid double init_execution calls
+
+ enum { QEP_NOT_PRESENT_YET, QEP_AVAILABLE, QEP_DELETED} have_query_plan;
/*
Additional WHERE and HAVING predicates to be considered for IN=>EXISTS
@@ -1269,6 +1282,7 @@ public:
ref_pointer_array_size= 0;
zero_result_cause= 0;
optimized= 0;
+ have_query_plan= QEP_NOT_PRESENT_YET;
initialized= 0;
cleaned= 0;
cond_equal= 0;
@@ -1300,9 +1314,11 @@ public:
SELECT_LEX_UNIT *unit);
bool prepare_stage2();
int optimize();
+ int optimize_inner();
int reinit();
int init_execution();
void exec();
+ void exec_inner();
int destroy();
void restore_tmp();
bool alloc_func_list();
@@ -1416,6 +1432,11 @@ public:
{
return (unit->item && unit->item->is_in_predicate());
}
+
+ int print_explain(select_result_sink *result, uint8 explain_flags,
+ bool on_the_fly,
+ bool need_tmp_table, bool need_order,
+ bool distinct,const char *message);
private:
/**
TRUE if the query contains an aggregate function but has no GROUP
@@ -1767,6 +1788,9 @@ inline bool optimizer_flag(THD *thd, uint flag)
return (thd->variables.optimizer_switch & flag);
}
+int print_fake_select_lex_join(select_result_sink *result, bool on_the_fly,
+ SELECT_LEX *select_lex, uint8 select_options);
+
uint get_index_for_order(ORDER *order, TABLE *table, SQL_SELECT *select,
ha_rows limit, bool *need_sort, bool *reverse);
ORDER *simple_remove_const(ORDER *order, COND *where);
diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc
index dce679a883f..b5b7f9866c5 100644
--- a/sql/sql_servers.cc
+++ b/sql/sql_servers.cc
@@ -158,7 +158,7 @@ bool servers_init(bool dont_read_servers_table)
}
/* Initialize the mem root for data */
- init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
+ init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
if (dont_read_servers_table)
goto end;
@@ -178,7 +178,7 @@ bool servers_init(bool dont_read_servers_table)
return_val= servers_reload(thd);
delete thd;
/* Remember that we don't have a THD */
- my_pthread_setspecific_ptr(THR_THD, 0);
+ set_current_thd(0);
end:
DBUG_RETURN(return_val);
@@ -209,7 +209,7 @@ static bool servers_load(THD *thd, TABLE_LIST *tables)
my_hash_reset(&servers_cache);
free_root(&mem, MYF(0));
- init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0);
+ init_sql_alloc(&mem, ACL_ALLOC_BLOCK_SIZE, 0, MYF(0));
if (init_read_record(&read_record_info,thd,table=tables[0].table,NULL,1,0,
FALSE))
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index cb96db0a46d..095ad409a94 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -45,6 +45,7 @@
#include "set_var.h"
#include "sql_trigger.h"
#include "sql_derived.h"
+#include "sql_statistics.h"
#include "sql_connect.h"
#include "authors.h"
#include "contributors.h"
@@ -457,7 +458,7 @@ bool
ignore_db_dirs_init()
{
return my_init_dynamic_array(&ignore_db_dirs_array, sizeof(LEX_STRING *),
- 0, 0);
+ 0, 0, MYF(0));
}
@@ -736,7 +737,8 @@ find_files(THD *thd, List<LEX_STRING> *files, const char *db,
bzero((char*) &table_list,sizeof(table_list));
- if (!(dirp = my_dir(path,MYF(dir ? MY_WANT_STAT : 0))))
+ if (!(dirp = my_dir(path,MYF((dir ? MY_WANT_STAT : 0) |
+ MY_THREAD_SPECIFIC))))
{
if (my_errno == ENOENT)
my_error(ER_BAD_DB_ERROR, MYF(ME_BELL+ME_WAITTANG), db);
@@ -1386,8 +1388,35 @@ static void append_directory(THD *thd, String *packet, const char *dir_type,
#define LIST_PROCESS_HOST_LEN 64
-static bool get_field_default_value(THD *thd, Field *timestamp_field,
- Field *field, String *def_value,
+
+/**
+ Print "ON UPDATE" clause of a field into a string.
+
+ @param timestamp_field Pointer to timestamp field of a table.
+ @param field The field to generate ON UPDATE clause for.
+ @bool lcase Whether to print in lower case.
+ @return false on success, true on error.
+*/
+static bool print_on_update_clause(Field *field, String *val, bool lcase)
+{
+ DBUG_ASSERT(val->charset()->mbminlen == 1);
+ val->length(0);
+ if (field->has_update_default_function())
+ {
+ if (lcase)
+ val->append(STRING_WITH_LEN("on update "));
+ else
+ val->append(STRING_WITH_LEN("ON UPDATE "));
+ val->append(STRING_WITH_LEN("CURRENT_TIMESTAMP"));
+ if (field->decimals() > 0)
+ val->append_parenthesized(field->decimals());
+ return true;
+ }
+ return false;
+}
+
+
+static bool get_field_default_value(THD *thd, Field *field, String *def_value,
bool quoted)
{
bool has_default;
@@ -1398,8 +1427,7 @@ static bool get_field_default_value(THD *thd, Field *timestamp_field,
We are using CURRENT_TIMESTAMP instead of NOW because it is
more standard
*/
- has_now_default= (timestamp_field == field &&
- field->unireg_check != Field::TIMESTAMP_UN_FIELD);
+ has_now_default= field->has_insert_default_function();
has_default= (field_type != FIELD_TYPE_BLOB &&
!(field->flags & NO_DEFAULT_VALUE_FLAG) &&
@@ -1411,7 +1439,11 @@ static bool get_field_default_value(THD *thd, Field *timestamp_field,
if (has_default)
{
if (has_now_default)
+ {
def_value->append(STRING_WITH_LEN("CURRENT_TIMESTAMP"));
+ if (field->decimals() > 0)
+ def_value->append_parenthesized(field->decimals());
+ }
else if (!field->is_null())
{ // Not null by default
char tmp[MAX_FIELD_WIDTH];
@@ -1643,16 +1675,18 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
}
if (!field->vcol_info &&
- get_field_default_value(thd, table->timestamp_field,
- field, &def_value, 1))
+ get_field_default_value(thd, field, &def_value, 1))
{
packet->append(STRING_WITH_LEN(" DEFAULT "));
packet->append(def_value.ptr(), def_value.length(), system_charset_info);
}
- if (!limited_mysql_mode && table->timestamp_field == field &&
- field->unireg_check != Field::TIMESTAMP_DN_FIELD)
- packet->append(STRING_WITH_LEN(" ON UPDATE CURRENT_TIMESTAMP"));
+ if (!limited_mysql_mode && print_on_update_clause(field, &def_value, false))
+ {
+ packet->append(STRING_WITH_LEN(" "));
+ packet->append(def_value);
+ }
+
if (field->unireg_check == Field::NEXT_NUMBER &&
!(thd->variables.sql_mode & MODE_NO_FIELD_OPTIONS))
@@ -2116,10 +2150,6 @@ public:
double progress;
};
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class I_List<thread_info>;
-#endif
-
static const char *thread_state_info(THD *tmp)
{
#ifndef EMBEDDED_LIBRARY
@@ -2281,6 +2311,179 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
DBUG_VOID_RETURN;
}
+
+/*
+ Produce EXPLAIN data.
+
+ This function is APC-scheduled to be run in the context of the thread that
+ we're producing EXPLAIN for.
+*/
+
+void Show_explain_request::call_in_target_thread()
+{
+ Query_arena backup_arena;
+ bool printed_anything= FALSE;
+
+ /*
+ Change the arena because JOIN::print_explain and co. are going to allocate
+ items. Let them allocate them on our arena.
+ */
+ target_thd->set_n_backup_active_arena((Query_arena*)request_thd,
+ &backup_arena);
+
+ query_str.copy(target_thd->query(),
+ target_thd->query_length(),
+ target_thd->query_charset());
+
+ DBUG_ASSERT(current_thd == target_thd);
+ set_current_thd(request_thd);
+ if (target_thd->lex->unit.print_explain(explain_buf, 0 /* explain flags*/,
+ &printed_anything))
+ {
+ failed_to_produce= TRUE;
+ }
+ set_current_thd(target_thd);
+
+ if (!printed_anything)
+ failed_to_produce= TRUE;
+
+ target_thd->restore_active_arena((Query_arena*)request_thd, &backup_arena);
+}
+
+
+int select_result_explain_buffer::send_data(List<Item> &items)
+{
+ int res;
+ THD *cur_thd= current_thd;
+ DBUG_ENTER("select_result_explain_buffer::send_data");
+
+ /*
+ Switch to the recieveing thread, so that we correctly count memory used
+ by it. This is needed as it's the receiving thread that will free the
+ memory.
+ */
+ set_current_thd(thd);
+ fill_record(thd, dst_table, dst_table->field, items, TRUE, FALSE);
+ res= dst_table->file->ha_write_tmp_row(dst_table->record[0]);
+ set_current_thd(cur_thd);
+ DBUG_RETURN(test(res));
+}
+
+
+/*
+ Store the SHOW EXPLAIN output in the temporary table.
+*/
+
+int fill_show_explain(THD *thd, TABLE_LIST *table, COND *cond)
+{
+ const char *calling_user;
+ THD *tmp;
+ my_thread_id thread_id;
+ DBUG_ENTER("fill_show_explain");
+
+ DBUG_ASSERT(cond==NULL);
+ thread_id= thd->lex->value_list.head()->val_int();
+ calling_user= (thd->security_ctx->master_access & PROCESS_ACL) ? NullS :
+ thd->security_ctx->priv_user;
+
+ if ((tmp= find_thread_by_id(thread_id)))
+ {
+ Security_context *tmp_sctx= tmp->security_ctx;
+ /*
+ If calling_user==NULL, calling thread has SUPER or PROCESS
+ privilege, and so can do SHOW EXPLAIN on any user.
+
+ if calling_user!=NULL, he's only allowed to view SHOW EXPLAIN on
+ his own threads.
+ */
+ if (calling_user && (!tmp_sctx->user || strcmp(calling_user,
+ tmp_sctx->user)))
+ {
+ my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "PROCESS");
+ mysql_mutex_unlock(&tmp->LOCK_thd_data);
+ DBUG_RETURN(1);
+ }
+
+ if (tmp == thd)
+ {
+ mysql_mutex_unlock(&tmp->LOCK_thd_data);
+ my_error(ER_TARGET_NOT_EXPLAINABLE, MYF(0));
+ DBUG_RETURN(1);
+ }
+
+ bool bres;
+ /*
+ Ok we've found the thread of interest and it won't go away because
+ we're holding its LOCK_thd data. Post it a SHOW EXPLAIN request.
+ */
+ bool timed_out;
+ int timeout_sec= 30;
+ Show_explain_request explain_req;
+ select_result_explain_buffer *explain_buf;
+
+ explain_buf= new select_result_explain_buffer(thd, table->table);
+
+ explain_req.explain_buf= explain_buf;
+ explain_req.target_thd= tmp;
+ explain_req.request_thd= thd;
+ explain_req.failed_to_produce= FALSE;
+
+ /* Ok, we have a lock on target->LOCK_thd_data, can call: */
+ bres= tmp->apc_target.make_apc_call(thd, &explain_req, timeout_sec, &timed_out);
+
+ if (bres || explain_req.failed_to_produce)
+ {
+ if (thd->killed)
+ thd->send_kill_message();
+ else if (timed_out)
+ my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
+ else
+ my_error(ER_TARGET_NOT_EXPLAINABLE, MYF(0));
+
+ bres= TRUE;
+ }
+ else
+ {
+ /*
+ Push the query string as a warning. The query may be in a different
+ charset than the charset that's used for error messages, so, convert it
+ if needed.
+ */
+ CHARSET_INFO *fromcs= explain_req.query_str.charset();
+ CHARSET_INFO *tocs= error_message_charset_info;
+ char *warning_text;
+ if (!my_charset_same(fromcs, tocs))
+ {
+ uint conv_length= 1 + tocs->mbmaxlen * explain_req.query_str.length() /
+ fromcs->mbminlen;
+ uint dummy_errors;
+ char *to, *p;
+ if (!(to= (char*)thd->alloc(conv_length + 1)))
+ DBUG_RETURN(1);
+ p= to;
+ p+= copy_and_convert(to, conv_length, tocs,
+ explain_req.query_str.c_ptr(),
+ explain_req.query_str.length(), fromcs,
+ &dummy_errors);
+ *p= 0;
+ warning_text= to;
+ }
+ else
+ warning_text= explain_req.query_str.c_ptr_safe();
+
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_YES, warning_text);
+ }
+ DBUG_RETURN(bres);
+ }
+ else
+ {
+ my_error(ER_NO_SUCH_THREAD, MYF(0), thread_id);
+ DBUG_RETURN(1);
+ }
+}
+
+
int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
{
TABLE *table= tables->table;
@@ -2389,6 +2592,18 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
}
mysql_mutex_unlock(&tmp->LOCK_thd_data);
+ /*
+ This may become negative if we free a memory allocated by another
+ thread in this thread. However it's better that we notice it eventually
+ than hide it.
+ */
+ table->field[12]->store((longlong) (tmp->status_var.memory_used +
+ sizeof(THD)),
+ FALSE);
+ table->field[12]->set_notnull();
+ table->field[13]->store((longlong) tmp->examined_row_count, TRUE);
+ table->field[13]->set_notnull();
+
if (schema_table_store_record(thd, table))
{
mysql_mutex_unlock(&LOCK_thread_count);
@@ -2461,7 +2676,7 @@ int add_status_vars(SHOW_VAR *list)
if (status_vars_inited)
mysql_mutex_lock(&LOCK_status);
if (!all_status_vars.buffer && // array is not allocated yet - do it now
- my_init_dynamic_array(&all_status_vars, sizeof(SHOW_VAR), 200, 20))
+ my_init_dynamic_array(&all_status_vars, sizeof(SHOW_VAR), 200, 20, MYF(0)))
{
res= 1;
goto err;
@@ -2611,6 +2826,7 @@ static bool show_status_array(THD *thd, const char *wild,
for (; variables->name; variables++)
{
+ bool wild_checked;
strnmov(prefix_end, variables->name, len);
name_buffer[sizeof(name_buffer)-1]=0; /* Safety */
if (ucase_names)
@@ -2619,11 +2835,25 @@ static bool show_status_array(THD *thd, const char *wild,
restore_record(table, s->default_values);
table->field[0]->store(name_buffer, strlen(name_buffer),
system_charset_info);
+
/*
- if var->type is SHOW_FUNC, call the function.
- Repeat as necessary, if new var is again SHOW_FUNC
+ Compare name for types that can't return arrays. We do this to not
+ calculate the value for function variables that we will not access
*/
- for (var=variables; var->type == SHOW_FUNC; var= &tmp)
+ if ((variables->type != SHOW_FUNC && variables->type != SHOW_ARRAY))
+ {
+ if (wild && wild[0] && wild_case_compare(system_charset_info,
+ name_buffer, wild))
+ continue;
+ wild_checked= 1; // Avoid checking it again
+ }
+
+ /*
+ if var->type is SHOW_FUNC or SHOW_SIMPLE_FUNC, call the function.
+ Repeat as necessary, if new var is again one of the above
+ */
+ for (var=variables; var->type == SHOW_FUNC ||
+ var->type == SHOW_SIMPLE_FUNC; var= &tmp)
((mysql_show_var_func)(var->value))(thd, &tmp, buff);
SHOW_TYPE show_type=var->type;
@@ -2634,8 +2864,9 @@ static bool show_status_array(THD *thd, const char *wild,
}
else
{
- if (!(wild && wild[0] && wild_case_compare(system_charset_info,
- name_buffer, wild)) &&
+ if ((wild_checked ||
+ (wild && wild[0] && wild_case_compare(system_charset_info,
+ name_buffer, wild))) &&
(!cond || cond->val_int()))
{
char *value=var->value;
@@ -2792,7 +3023,8 @@ static int aggregate_user_stats(HASH *all_user_stats, HASH *agg_user_stats)
{
// First entry for this role.
if (!(agg_user= (USER_STATS*) my_malloc(sizeof(USER_STATS),
- MYF(MY_WME | MY_ZEROFILL))))
+ MYF(MY_WME | MY_ZEROFILL|
+ MY_THREAD_SPECIFIC))))
{
sql_print_error("Malloc in aggregate_user_stats failed");
DBUG_RETURN(1);
@@ -4165,7 +4397,7 @@ static int fill_schema_table_from_frm(THD *thd, TABLE_LIST *tables,
if (schema_table->i_s_requested_object & OPEN_TRIGGER_ONLY)
{
- init_sql_alloc(&tbl.mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+ init_sql_alloc(&tbl.mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
if (!Table_triggers_list::check_n_load(thd, db_name->str,
table_name->str, &tbl, 1))
{
@@ -5035,7 +5267,7 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
const char *wild= lex->wild ? lex->wild->ptr() : NullS;
CHARSET_INFO *cs= system_charset_info;
TABLE *show_table;
- Field **ptr, *field, *timestamp_field;
+ Field **ptr, *field;
int count;
DBUG_ENTER("get_schema_column_record");
@@ -5059,7 +5291,6 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
show_table= tables->table;
count= 0;
ptr= show_table->field;
- timestamp_field= show_table->timestamp_field;
show_table->use_all_columns(); // Required for default
restore_record(show_table, s->default_values);
@@ -5107,7 +5338,7 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
cs);
table->field[4]->store((longlong) count, TRUE);
- if (get_field_default_value(thd, timestamp_field, field, &type, 0))
+ if (get_field_default_value(thd, field, &type, 0))
{
table->field[5]->store(type.ptr(), type.length(), cs);
table->field[5]->set_notnull();
@@ -5124,10 +5355,8 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
if (field->unireg_check == Field::NEXT_NUMBER)
table->field[17]->store(STRING_WITH_LEN("auto_increment"), cs);
- if (timestamp_field == field &&
- field->unireg_check != Field::TIMESTAMP_DN_FIELD)
- table->field[17]->store(STRING_WITH_LEN("on update CURRENT_TIMESTAMP"),
- cs);
+ if (print_on_update_clause(field, &type, true))
+ table->field[17]->store(type.ptr(), type.length(), cs);
if (field->vcol_info)
{
if (field->stored_in_db)
@@ -5734,9 +5963,12 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
TABLE *show_table= tables->table;
KEY *key_info=show_table->s->key_info;
if (show_table->file)
+ {
show_table->file->info(HA_STATUS_VARIABLE |
HA_STATUS_NO_LOCK |
HA_STATUS_TIME);
+ set_statistics_for_table(thd, show_table);
+ }
for (uint i=0 ; i < show_table->s->keys ; i++,key_info++)
{
KEY_PART_INFO *key_part= key_info->key_part;
@@ -5767,8 +5999,8 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
KEY *key=show_table->key_info+i;
if (key->rec_per_key[j])
{
- ha_rows records=(show_table->file->stats.records /
- key->rec_per_key[j]);
+ ha_rows records=((double) show_table->stat_records() /
+ key->actual_rec_per_key(j));
table->field[9]->store((longlong) records, TRUE);
table->field[9]->set_notnull();
}
@@ -8421,6 +8653,8 @@ ST_FIELD_INFO processlist_fields_info[]=
{"MAX_STAGE", 2, MYSQL_TYPE_TINY, 0, 0, "Max_stage", SKIP_OPEN_TABLE},
{"PROGRESS", 703, MYSQL_TYPE_DECIMAL, 0, 0, "Progress",
SKIP_OPEN_TABLE},
+ {"MEMORY_USED", 7, MYSQL_TYPE_LONG, 0, 0, "Memory_used", SKIP_OPEN_TABLE},
+ {"EXAMINED_ROWS", 7, MYSQL_TYPE_LONG, 0, 0, "Examined_rows", SKIP_OPEN_TABLE},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
};
@@ -8610,6 +8844,32 @@ ST_FIELD_INFO keycache_fields_info[]=
};
+ST_FIELD_INFO show_explain_fields_info[]=
+{
+ /* field_name, length, type, value, field_flags, old_name*/
+ {"id", 3, MYSQL_TYPE_LONGLONG, 0 /*value*/, MY_I_S_MAYBE_NULL, "id",
+ SKIP_OPEN_TABLE},
+ {"select_type", 19, MYSQL_TYPE_STRING, 0 /*value*/, 0, "select_type",
+ SKIP_OPEN_TABLE},
+ {"table", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0 /*value*/, MY_I_S_MAYBE_NULL,
+ "table", SKIP_OPEN_TABLE},
+ {"type", 15, MYSQL_TYPE_STRING, 0, MY_I_S_MAYBE_NULL, "type", SKIP_OPEN_TABLE},
+ {"possible_keys", NAME_CHAR_LEN*MAX_KEY, MYSQL_TYPE_STRING, 0/*value*/,
+ MY_I_S_MAYBE_NULL, "possible_keys", SKIP_OPEN_TABLE},
+ {"key", NAME_CHAR_LEN*MAX_KEY, MYSQL_TYPE_STRING, 0/*value*/,
+ MY_I_S_MAYBE_NULL, "key", SKIP_OPEN_TABLE},
+ {"key_len", NAME_CHAR_LEN*MAX_KEY, MYSQL_TYPE_STRING, 0/*value*/,
+ MY_I_S_MAYBE_NULL, "key_len", SKIP_OPEN_TABLE},
+ {"ref", NAME_CHAR_LEN*MAX_REF_PARTS, MYSQL_TYPE_STRING, 0/*value*/,
+ MY_I_S_MAYBE_NULL, "ref", SKIP_OPEN_TABLE},
+ {"rows", 10, MYSQL_TYPE_LONGLONG, 0/*value*/, MY_I_S_MAYBE_NULL, "rows",
+ SKIP_OPEN_TABLE},
+ {"Extra", 255, MYSQL_TYPE_STRING, 0/*value*/, 0 /*flags*/, "Extra",
+ SKIP_OPEN_TABLE},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
+};
+
+
/*
Description of ST_FIELD_INFO in table.h
@@ -8641,6 +8901,8 @@ ST_SCHEMA_TABLE schema_tables[]=
{"EVENTS", events_fields_info, create_schema_table,
0, make_old_format, 0, -1, -1, 0, 0},
#endif
+ {"EXPLAIN", show_explain_fields_info, create_schema_table, fill_show_explain,
+ make_old_format, 0, -1, -1, TRUE /*hidden*/ , 0},
{"FILES", files_fields_info, create_schema_table,
hton_fill_schema_table, 0, 0, -1, -1, 0, 0},
{"GLOBAL_STATUS", variables_fields_info, create_schema_table,
@@ -8716,18 +8978,13 @@ ST_SCHEMA_TABLE schema_tables[]=
};
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class List_iterator_fast<char>;
-template class List<char>;
-#endif
-
int initialize_schema_table(st_plugin_int *plugin)
{
ST_SCHEMA_TABLE *schema_table;
DBUG_ENTER("initialize_schema_table");
if (!(schema_table= (ST_SCHEMA_TABLE *)my_malloc(sizeof(ST_SCHEMA_TABLE),
- MYF(MY_WME | MY_ZEROFILL))))
+ MYF(MY_WME | MY_ZEROFILL))))
DBUG_RETURN(1);
/* Historical Requirement */
plugin->data= schema_table; // shortcut for the future
diff --git a/sql/sql_show.h b/sql/sql_show.h
index 6e87f6097f0..03d8af3aabd 100644
--- a/sql/sql_show.h
+++ b/sql/sql_show.h
@@ -19,6 +19,7 @@
#include "sql_list.h" /* List */
#include "handler.h" /* enum_schema_tables */
#include "table.h" /* enum_schema_table_state */
+#include "my_apc.h"
/* Forward declarations */
class JOIN;
@@ -131,6 +132,31 @@ enum enum_schema_tables get_schema_table_idx(ST_SCHEMA_TABLE *schema_table);
/* These functions were under INNODB_COMPATIBILITY_HOOKS */
int get_quote_char_for_identifier(THD *thd, const char *name, uint length);
+THD *find_thread_by_id(ulong id);
+
+class select_result_explain_buffer;
+/*
+ SHOW EXPLAIN request object.
+*/
+
+class Show_explain_request : public Apc_target::Apc_call
+{
+public:
+ THD *target_thd; /* thd that we're running SHOW EXPLAIN for */
+ THD *request_thd; /* thd that run SHOW EXPLAIN command */
+
+ /* If true, there was some error when producing EXPLAIN output. */
+ bool failed_to_produce;
+
+ /* SHOW EXPLAIN will be stored here */
+ select_result_explain_buffer *explain_buf;
+
+ /* Query that we've got SHOW EXPLAIN for */
+ String query_str;
+
+ /* Overloaded virtual function */
+ void call_in_target_thread();
+};
/* Handle the ignored database directories list for SHOW/I_S. */
bool ignore_db_dirs_init();
diff --git a/sql/sql_sort.h b/sql/sql_sort.h
index f1a3a2f9d8b..d30ddfb6eec 100644
--- a/sql/sql_sort.h
+++ b/sql/sql_sort.h
@@ -16,6 +16,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+#include "m_string.h" /* memset */
#include "my_global.h" /* uchar */
#include "my_base.h" /* ha_rows */
#include "my_sys.h" /* qsort2_cmp */
@@ -27,7 +28,6 @@ typedef struct st_sort_field SORT_FIELD;
class Field;
struct TABLE;
-
/* Defines used by filesort and uniques */
#define MERGEBUFF 7
@@ -65,41 +65,51 @@ struct BUFFPEK_COMPARE_CONTEXT
void *key_compare_arg;
};
-typedef struct st_sort_param {
- uint rec_length; /* Length of sorted records */
- uint sort_length; /* Length of sorted columns */
- uint ref_length; /* Length of record ref. */
- uint addon_length; /* Length of added packed fields */
- uint res_length; /* Length of records in final sorted file/buffer */
- uint keys; /* Max keys / buffer */
+
+class Sort_param {
+public:
+ uint rec_length; // Length of sorted records.
+ uint sort_length; // Length of sorted columns.
+ uint ref_length; // Length of record ref.
+ uint addon_length; // Length of added packed fields.
+ uint res_length; // Length of records in final sorted file/buffer.
+ uint max_keys_per_buffer; // Max keys / buffer.
uint min_dupl_count;
- ha_rows max_rows,examined_rows;
- TABLE *sort_form; /* For quicker make_sortkey */
+ ha_rows max_rows; // Select limit, or HA_POS_ERROR if unlimited.
+ ha_rows examined_rows; // Number of examined rows.
+ TABLE *sort_form; // For quicker make_sortkey.
SORT_FIELD *local_sortorder;
SORT_FIELD *end;
- SORT_ADDON_FIELD *addon_field; /* Descriptors for companion fields */
+ SORT_ADDON_FIELD *addon_field; // Descriptors for companion fields.
uchar *unique_buff;
bool not_killable;
char* tmp_buffer;
- /* The fields below are used only by Unique class */
+ // The fields below are used only by Unique class.
qsort2_cmp compare;
BUFFPEK_COMPARE_CONTEXT cmp_context;
-} SORTPARAM;
+ Sort_param()
+ {
+ memset(this, 0, sizeof(*this));
+ }
+ void init_for_filesort(uint sortlen, TABLE *table,
+ ulong max_length_for_sort_data,
+ ha_rows maxrows, bool sort_positions);
+};
-int merge_many_buff(SORTPARAM *param, uchar *sort_buffer,
+
+int merge_many_buff(Sort_param *param, uchar *sort_buffer,
BUFFPEK *buffpek,
uint *maxbuffer, IO_CACHE *t_file);
uint read_to_buffer(IO_CACHE *fromfile,BUFFPEK *buffpek,
uint sort_length);
-int merge_buffers(SORTPARAM *param,IO_CACHE *from_file,
- IO_CACHE *to_file, uchar *sort_buffer,
- BUFFPEK *lastbuff,BUFFPEK *Fb,
- BUFFPEK *Tb,int flag);
-int merge_index(SORTPARAM *param, uchar *sort_buffer,
+int merge_buffers(Sort_param *param,IO_CACHE *from_file,
+ IO_CACHE *to_file, uchar *sort_buffer,
+ BUFFPEK *lastbuff,BUFFPEK *Fb,
+ BUFFPEK *Tb,int flag);
+int merge_index(Sort_param *param, uchar *sort_buffer,
BUFFPEK *buffpek, uint maxbuffer,
IO_CACHE *tempfile, IO_CACHE *outfile);
-
void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length);
#endif /* SQL_SORT_INCLUDED */
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
new file mode 100644
index 00000000000..e34b4b21819
--- /dev/null
+++ b/sql/sql_statistics.cc
@@ -0,0 +1,3056 @@
+/* Copyright (C) 2009 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/**
+ @file
+
+ @brief
+ functions to update persitent statistical tables and to read from them
+
+ @defgroup Query_Optimizer Query Optimizer
+ @{
+*/
+
+#include "sql_base.h"
+#include "key.h"
+#include "sql_statistics.h"
+#include "my_atomic.h"
+
+/*
+ The system variable 'use_stat_tables' can take one of the
+ following values:
+ "never", "complementary", "preferably".
+ If the values of the variable 'use_stat_tables' is set to
+ "never then any statistical data from the persistent statistical tables
+ is ignored by the optimizer.
+ If the value of the variable 'use_stat_tables' is set to
+ "complementary" then a particular statistical characteristic is used
+ by the optimizer only if the database engine does not provide similar
+ statistics. For example, 'nulls_ratio' for table columns currently
+ are not provided by any engine. So optimizer uses this statistical data
+ from the statistical tables. At the same time it does not use
+ 'avg_frequency' for any index prefix from the statistical tables since
+ the a similar statistical characteristic 'records_per_key' can be
+ requested from the database engine.
+ If the value the variable 'use_stat_tables' is set to
+ "preferably" the optimizer uses a particular statistical data only if
+ it can't be found in the statistical data.
+ If an ANALYZE command is executed then it results in collecting
+ statistical data for the tables specified by the command and storing
+ the collected statistics in the persistent statistical tables only
+ when the value of the variable 'use_stat_tables' is not
+ equal to "never".
+*/
+
+/* Currently there are only 3 persistent statistical tables */
+static const uint STATISTICS_TABLES= 3;
+
+/*
+ The names of the statistical tables in this array must correspond the
+ definitions of the tables in the file ../scripts/mysql_system_tables.sql
+*/
+static const LEX_STRING stat_table_name[STATISTICS_TABLES]=
+{
+ { C_STRING_WITH_LEN("table_stats") },
+ { C_STRING_WITH_LEN("column_stats") },
+ { C_STRING_WITH_LEN("index_stats") }
+};
+
+/* Name of database to which the statistical tables belong */
+static const LEX_STRING stat_tables_db_name= { C_STRING_WITH_LEN("mysql") };
+
+
+/**
+ @details
+ The function builds a list of TABLE_LIST elements for system statistical
+ tables using array of TABLE_LIST passed as a parameter.
+ The lock type of each element is set to TL_READ if for_write = FALSE,
+ otherwise it is set to TL_WRITE.
+*/
+
+static
+inline void init_table_list_for_stat_tables(TABLE_LIST *tables, bool for_write)
+{
+ uint i;
+
+ memset((char *) &tables[0], 0, sizeof(TABLE_LIST) * STATISTICS_TABLES);
+
+ for (i= 0; i < STATISTICS_TABLES; i++)
+ {
+ tables[i].db= stat_tables_db_name.str;
+ tables[i].db_length= stat_tables_db_name.length;
+ tables[i].alias= tables[i].table_name= stat_table_name[i].str;
+ tables[i].table_name_length= stat_table_name[i].length;
+ tables[i].lock_type= for_write ? TL_WRITE : TL_READ;
+ if (i < STATISTICS_TABLES - 1)
+ tables[i].next_global= tables[i].next_local=
+ tables[i].next_name_resolution_table= &tables[i+1];
+ if (i != 0)
+ tables[i].prev_global= &tables[i-1].next_global;
+ }
+}
+
+
+/**
+ @details
+ The function builds a TABLE_LIST containing only one element 'tbl' for
+ the statistical table called 'stat_tab_name'.
+ The lock type of the element is set to TL_READ if for_write = FALSE,
+ otherwise it is set to TL_WRITE.
+*/
+
+static
+inline void init_table_list_for_single_stat_table(TABLE_LIST *tbl,
+ const LEX_STRING *stat_tab_name,
+ bool for_write)
+{
+ memset((char *) tbl, 0, sizeof(TABLE_LIST));
+
+ tbl->db= stat_tables_db_name.str;
+ tbl->db_length= stat_tables_db_name.length;
+ tbl->alias= tbl->table_name= stat_tab_name->str;
+ tbl->table_name_length= stat_tab_name->length;
+ tbl->lock_type= for_write ? TL_WRITE : TL_READ;
+}
+
+
+/**
+ @brief
+ Open all statistical tables and lock them
+*/
+
+static
+inline int open_stat_tables(THD *thd, TABLE_LIST *tables,
+ Open_tables_backup *backup,
+ bool for_write)
+{
+ init_table_list_for_stat_tables(tables, for_write);
+ init_mdl_requests(tables);
+ return open_system_tables_for_read(thd, tables, backup);
+}
+
+
+/**
+ @brief
+ Open a statistical table and lock it
+*/
+static
+inline int open_single_stat_table(THD *thd, TABLE_LIST *table,
+ const LEX_STRING *stat_tab_name,
+ Open_tables_backup *backup,
+ bool for_write)
+{
+ init_table_list_for_single_stat_table(table, stat_tab_name, for_write);
+ init_mdl_requests(table);
+ return open_system_tables_for_read(thd, table, backup);
+}
+
+
+/*
+ The class Column_statistics_collected is a helper class used to collect
+ statistics on a table column. The class is derived directly from
+ the class Column_statistics, and, additionally to the fields of the
+ latter, it contains the fields to accumulate the results of aggregation
+ for the number of nulls in the column and for the size of the column
+ values. There is also a container for distinct column values used
+ to calculate the average number of records per distinct column value.
+*/
+
+class Column_statistics_collected :public Column_statistics
+{
+
+private:
+ Field *column; /* The column to collect statistics on */
+ ha_rows nulls; /* To accumulate the number of nulls in the column */
+ ulonglong column_total_length; /* To accumulate the size of column values */
+ Count_distinct_field *count_distinct; /* The container for distinct
+ column values */
+
+ bool is_single_pk_col; /* TRUE <-> the only column of the primary key */
+
+public:
+
+ inline void init(THD *thd, Field * table_field);
+ inline void add(ha_rows rowno);
+ inline void finish(ha_rows rows);
+ inline void cleanup();
+};
+
+
+/**
+ Stat_table is the base class for classes Table_stat, Column_stat and
+ Index_stat. The methods of these classes allow us to read statistical
+ data from statistical tables, write collected statistical data into
+ statistical tables and update statistical data in these tables
+ as well as update access fields belonging to the primary key and
+ delete records by prefixes of the primary key.
+ Objects of the classes Table_stat, Column_stat and Index stat are used
+ for reading/writing statistics from/into persistent tables table_stats,
+ column_stats and index_stats correspondingly. These tables are stored in
+ the system database 'mysql'.
+
+ Statistics is read and written always for a given database table t. When
+ an object of any of these classes is created a pointer to the TABLE
+ structure for this database table is passed as a parameter to the constructor
+ of the object. The other parameter is a pointer to the TABLE structure for
+ the corresponding statistical table st. So construction of an object to
+ read/write statistical data on table t from/into statistical table st
+ requires both table t and st to be opened.
+ In some cases the TABLE structure for table t may be undefined. Then
+ the objects of the classes Table_stat, Column_stat and Index stat are
+ created by the alternative constructor that require only the name
+ of the table t and the name of the database it belongs to. Currently the
+ alternative constructors are used only in the cases when some records
+ belonging to the table are to be deleted, or its keys are to be updated
+
+ Reading/writing statistical data from/into a statistical table is always
+ performed by a key. At the moment there is only one key defined for each
+ statistical table and this key is primary.
+ The primary key for the table table_stats is built as (db_name, table_name).
+ The primary key for the table column_stats is built as (db_name, table_name,
+ column_name).
+ The primary key for the table index_stats is built as (db_name, table_name,
+ index_name, prefix_arity).
+
+ Reading statistical data from a statistical table is performed by the
+ following pattern. First a table dependent method sets the values of the
+ the fields that comprise the lookup key. Then an implementation of the
+ method get_stat_values() declared in Stat_table as a pure virtual method
+ finds the row from the statistical table by the set key. If the row is
+ found the values of statistical fields are read from this row and are
+ distributed in the internal structures.
+
+ Let's assume the statistical data is read for table t from database db.
+
+ When statistical data is searched in the table table_stats first
+ Table_stat::set_key_fields() should set the fields of db_name and
+ table_name. Then get_stat_values looks for a row by the set key value,
+ and, if the row is found, reads the value from the column
+ table_stats.cardinality into the field read_stat.cardinality of the TABLE
+ structure for table t and sets the value of read_stat.cardinality_is_null
+ from this structure to FALSE. If the value of the 'cardinality' column
+ in the row is null or if no row is found read_stat.cardinality_is_null
+ is set to TRUE.
+
+ When statistical data is searched in the table column_stats first
+ Column_stat::set_key_fields() should set the fields of db_name, table_name
+ and column_name with column_name taken out of the only parameter f of the
+ Field* type passed to this method. After this get_stat_values looks
+ for a row by the set key value. If the row is found the values of statistical
+ data columns min_value, max_value, nulls_ratio, avg_length, avg_frequency
+ are read into internal structures. Values of nulls_ratio, avg_length,
+ avg_frequency are read into the corresponding fields of the read_stat
+ structure from the Field object f, while values from min_value and max_value
+ are copied into the min_value and max_value record buffers attached to the
+ TABLE structure for table t.
+ If the value of a statistical column in the found row is null, then the
+ corresponding flag in the f->read_stat.column_stat_nulls bitmap is set off.
+ Otherwise the flag is set on. If no row is found for the column the all flags
+ in f->column_stat_nulls are set off.
+
+ When statistical data is searched in the table index_stats first
+ Index_stat::set_key_fields() has to be called to set the fields of db_name,
+ table_name, index_name and prefix_arity. The value of index_name is extracted
+ from the first parameter key_info of the KEY* type passed to the method.
+ This parameter specifies the index of interest idx. The second parameter
+ passed to the method specifies the arity k of the index prefix for which
+ statistical data is to be read. E.g. if the index idx consists of 3
+ components (p1,p2,p3) the table index_stats usually will contain 3 rows for
+ this index: the first - for the prefix (p1), the second - for the prefix
+ (p1,p2), and the third - for the the prefix (p1,p2,p3). After the key fields
+ has been set a call of get_stat_value looks for a row by the set key value.
+ If the row is found and the value of the avg_frequency column is not null
+ then this value is assigned to key_info->read_stat.avg_frequency[k].
+ Otherwise 0 is assigned to this element.
+
+ The method Stat_table::update_stat is used to write statistical data
+ collected in the internal structures into a statistical table st.
+ It is assumed that before any invocation of this method a call of the
+ function st.set_key_fields has set the values of the primary key fields
+ that serve to locate the row from the statistical table st where the
+ the collected statistical data from internal structures are to be written
+ to. The statistical data is written from the counterparts of the
+ statistical fields of internal structures into which it would be read
+ by the functions get_stat_values. The counterpart fields are used
+ only when statistics is collected
+ When updating/inserting a row from the statistical table st the method
+ Stat_table::update_stat calls the implementation of the pure virtual
+ method store_field_values to transfer statistical data from the fields
+ of internal structures to the fields of record buffer used for updates
+ of the statistical table st.
+*/
+
+class Stat_table
+{
+
+private:
+
+ /* Handler used for the retrieval of the statistical table stat_table */
+ handler *stat_file;
+
+ uint stat_key_length; /* Length of the key to access stat_table */
+ uchar *record[2]; /* Record buffers used to access/update stat_table */
+ uint stat_key_idx; /* The number of the key to access stat_table */
+
+ /* This is a helper function used only by the Stat_table constructors */
+ void common_init_stat_table()
+ {
+ stat_file= stat_table->file;
+ /* Currently any statistical table has only one key */
+ stat_key_idx= 0;
+ stat_key_info= &stat_table->key_info[stat_key_idx];
+ stat_key_length= stat_key_info->key_length;
+ record[0]= stat_table->record[0];
+ record[1]= stat_table->record[1];
+ }
+
+protected:
+
+ /* Statistical table to read statistics from or to update/delete */
+ TABLE *stat_table;
+ KEY *stat_key_info; /* Structure for the index to access stat_table */
+
+ /* Table for which statistical data is read / updated */
+ TABLE *table;
+ TABLE_SHARE *table_share; /* Table share for 'table */
+ LEX_STRING *db_name; /* Name of the database containing 'table' */
+ LEX_STRING *table_name; /* Name of the table 'table' */
+
+ void store_record_for_update()
+ {
+ store_record(stat_table, record[1]);
+ }
+
+ void store_record_for_lookup()
+ {
+ store_record(stat_table, record[0]);
+ }
+
+ bool update_record()
+ {
+ int err;
+ if ((err= stat_file->ha_update_row(record[1], record[0])) &&
+ err != HA_ERR_RECORD_IS_THE_SAME)
+ return TRUE;
+ return FALSE;
+ }
+
+public:
+
+
+ /**
+ @details
+ This constructor has to be called by any constructor of the derived
+ classes. The constructor 'tunes' the private and protected members of
+ the constructed object to the statistical table 'stat_table' with the
+ statistical data of our interest and to the table 'tab' for which this
+ statistics has been collected.
+ */
+
+ Stat_table(TABLE *stat, TABLE *tab)
+ :stat_table(stat), table(tab)
+ {
+ table_share= tab->s;
+ common_init_stat_table();
+ db_name= &table_share->db;
+ table_name= &table_share->table_name;
+ }
+
+
+ /**
+ @details
+ This constructor has to be called by any constructor of the derived
+ classes. The constructor 'tunes' the private and protected members of
+ the constructed object to the statistical table 'stat_table' with the
+ statistical data of our interest and to the table t for which this
+ statistics has been collected. The table t is uniquely specified
+ by the database name 'db' and the table name 'tab'.
+ */
+
+ Stat_table(TABLE *stat, LEX_STRING *db, LEX_STRING *tab)
+ :stat_table(stat), table_share(NULL)
+ {
+ common_init_stat_table();
+ db_name= db;
+ table_name= tab;
+ }
+
+
+ virtual ~Stat_table() {}
+
+ /**
+ @brief
+ Store the given values of fields for database name and table name
+
+ @details
+ This is a purely virtual method.
+ The implementation for any derived class shall store the given
+ values of the database name and table name in the corresponding
+ fields of stat_table.
+
+ @note
+ The method is called by the update_table_name_key_parts function.
+ */
+
+ virtual void change_full_table_name(LEX_STRING *db, LEX_STRING *tab)= 0;
+
+
+ /**
+ @brief
+ Store statistical data into fields of the statistical table
+
+ @details
+ This is a purely virtual method.
+ The implementation for any derived class shall put the appropriate
+ statistical data into the corresponding fields of stat_table.
+
+ @note
+ The method is called by the update_stat function.
+ */
+
+ virtual void store_stat_fields()= 0;
+
+
+ /**
+ @brief
+ Read statistical data from fields of the statistical table
+
+ @details
+ This is a purely virtual method.
+ The implementation for any derived read shall read the appropriate
+ statistical data from the corresponding fields of stat_table.
+ */
+
+ virtual void get_stat_values()= 0;
+
+
+ /**
+ @brief
+ Find a record in the statistical table by a primary key
+
+ @details
+ The function looks for a record in stat_table by its primary key.
+ It assumes that the key fields have been already stored in the record
+ buffer of stat_table.
+
+ @retval
+ FALSE the record is not found
+ @retval
+ TRUE the record is found
+ */
+
+ bool find_stat()
+ {
+ uchar key[MAX_KEY_LENGTH];
+ key_copy(key, record[0], stat_key_info, stat_key_length);
+ return !stat_file->ha_index_read_idx_map(record[0], stat_key_idx, key,
+ HA_WHOLE_KEY, HA_READ_KEY_EXACT);
+ }
+
+
+ /**
+ @brief
+ Find a record in the statistical table by a key prefix value
+
+ @details
+ The function looks for a record in stat_table by the key value consisting
+ of 'prefix_parts' major components for the primary index.
+ It assumes that the key prefix fields have been already stored in the record
+ buffer of stat_table.
+
+ @retval
+ FALSE the record is not found
+ @retval
+ TRUE the record is found
+ */
+
+ bool find_next_stat_for_prefix(uint prefix_parts)
+ {
+ uchar key[MAX_KEY_LENGTH];
+ uint prefix_key_length= 0;
+ for (uint i= 0; i < prefix_parts; i++)
+ prefix_key_length+= stat_key_info->key_part[i].store_length;
+ key_copy(key, record[0], stat_key_info, prefix_key_length);
+ key_part_map prefix_map= (key_part_map) ((1 << prefix_parts) - 1);
+ return !stat_file->ha_index_read_idx_map(record[0], stat_key_idx, key,
+ prefix_map, HA_READ_KEY_EXACT);
+ }
+
+
+ /**
+ @brief
+ Update/insert a record in the statistical table with new statistics
+
+ @details
+ The function first looks for a record by its primary key in the statistical
+ table stat_table. If the record is found the function updates statistical
+ fields of the records. The data for these fields are taken from internal
+ structures containing info on the table 'table'. If the record is not
+ found the function inserts a new record with the primary key set to the
+ search key and the statistical data taken from the internal structures.
+ The function assumes that the key fields have been already stored in
+ the record buffer of stat_table.
+
+ @retval
+ FALSE success with the update/insert of the record
+ @retval
+ TRUE failure with the update/insert of the record
+
+ @note
+ The function calls the virtual method store_stat_fields to populate the
+ statistical fields of the updated/inserted row with new statistics.
+ */
+
+ bool update_stat()
+ {
+ if (find_stat())
+ {
+ store_record_for_update();
+ store_stat_fields();
+ return update_record();
+ }
+ else
+ {
+ int err;
+ store_stat_fields();
+ if ((err= stat_file->ha_write_row(record[0])))
+ return TRUE;
+ }
+ return FALSE;
+ }
+
+
+ /**
+ @brief
+ Update the table name fields in the current record of stat_table
+
+ @details
+ The function updates the fields containing database name and table name
+ for the last found record in the statistical table stat_table.
+ The corresponding names for update is taken from the parameters
+ db and tab.
+
+ @retval
+ FALSE success with the update of the record
+ @retval
+ TRUE failure with the update of the record
+
+ @note
+ The function calls the virtual method change_full_table_name
+ to store the new names in the record buffer used for updates.
+ */
+
+ bool update_table_name_key_parts(LEX_STRING *db, LEX_STRING *tab)
+ {
+ store_record_for_update();
+ change_full_table_name(db, tab);
+ bool rc= update_record();
+ store_record_for_lookup();
+ return rc;
+ }
+
+
+ /**
+ @brief
+ Delete the current record of the statistical table stat_table
+
+ @details
+ The function deletes the last found record from the statistical
+ table stat_table.
+
+ @retval
+ FALSE success with the deletion of the record
+ @retval
+ TRUE failure with the deletion of the record
+ */
+
+ bool delete_stat()
+ {
+ int err;
+ if ((err= stat_file->ha_delete_row(record[0])))
+ return TRUE;
+ return FALSE;
+ }
+};
+
+
+/*
+ An object of the class Table_stat is created to read statistical
+ data on tables from the statistical table table_stats, to update
+ table_stats with such statistical data, or to update columns
+ of the primary key, or to delete the record by its primary key or
+ its prefix.
+ Rows from the statistical table are read and updated always by
+ primary key.
+*/
+
+class Table_stat: public Stat_table
+{
+
+private:
+
+ Field *db_name_field; /* Field for the column table_stats.db_name */
+ Field *table_name_field; /* Field for the column table_stats.table_name */
+
+ void common_init_table_stat()
+ {
+ db_name_field= stat_table->field[TABLE_STAT_DB_NAME];
+ table_name_field= stat_table->field[TABLE_STAT_TABLE_NAME];
+ }
+
+ void change_full_table_name(LEX_STRING *db, LEX_STRING *tab)
+ {
+ db_name_field->store(db->str, db->length, system_charset_info);
+ table_name_field->store(tab->str, tab->length, system_charset_info);
+ }
+
+public:
+
+ /**
+ @details
+ The constructor 'tunes' the private and protected members of the
+ constructed object for the statistical table table_stats to read/update
+ statistics on table 'tab'. The TABLE structure for the table table_stat
+ must be passed as a value for the parameter 'stat'.
+ */
+
+ Table_stat(TABLE *stat, TABLE *tab) :Stat_table(stat, tab)
+ {
+ common_init_table_stat();
+ }
+
+
+ /**
+ @details
+ The constructor 'tunes' the private and protected members of the
+ object constructed for the statistical table table_stat for
+ the future updates/deletes of the record concerning the table 'tab'
+ from the database 'db'.
+ */
+
+ Table_stat(TABLE *stat, LEX_STRING *db, LEX_STRING *tab)
+ :Stat_table(stat, db, tab)
+ {
+ common_init_table_stat();
+ }
+
+
+ /**
+ @brief
+ Set the key fields for the statistical table table_stat
+
+ @details
+ The function sets the values of the fields db_name and table_name
+ in the record buffer for the statistical table table_stat.
+ These fields comprise the primary key for the table.
+
+ @note
+ The function is supposed to be called before any use of the
+ method find_stat for an object of the Table_stat class.
+ */
+
+ void set_key_fields()
+ {
+ db_name_field->store(db_name->str, db_name->length, system_charset_info);
+ table_name_field->store(table_name->str, table_name->length,
+ system_charset_info);
+ }
+
+
+ /**
+ @brief
+ Store statistical data into statistical fields of table_stat
+
+ @details
+ This implementation of a purely virtual method sets the value of the
+ column 'cardinality' of the statistical table table_stat according to
+ the value of the flag write_stat.cardinality_is_null and the value of
+ the field write_stat.cardinality' from the TABLE structure for 'table'.
+ */
+
+ void store_stat_fields()
+ {
+ Field *stat_field= stat_table->field[TABLE_STAT_CARDINALITY];
+ if (table->collected_stats->cardinality_is_null)
+ stat_field->set_null();
+ else
+ {
+ stat_field->set_notnull();
+ stat_field->store(table->collected_stats->cardinality);
+ }
+ }
+
+
+ /**
+ @brief
+ Read statistical data from statistical fields of table_stat
+
+ @details
+ This implementation of a purely virtual method first looks for a record
+ the statistical table table_stat by its primary key set the record
+ buffer with the help of Table_stat::set_key_fields. Then, if the row is
+ found the function reads the value of the column 'cardinality' of the table
+ table_stat and sets the value of the flag read_stat.cardinality_is_null
+ and the value of the field read_stat.cardinality' from the TABLE structure
+ for 'table' accordingly.
+ */
+
+ void get_stat_values()
+ {
+ Table_statistics *read_stats= table_share->stats_cb.table_stats;
+ read_stats->cardinality_is_null= TRUE;
+ read_stats->cardinality= 0;
+ if (find_stat())
+ {
+ Field *stat_field= stat_table->field[TABLE_STAT_CARDINALITY];
+ if (!stat_field->is_null())
+ {
+ read_stats->cardinality_is_null= FALSE;
+ read_stats->cardinality= stat_field->val_int();
+ }
+ }
+ }
+
+};
+
+
+/*
+ An object of the class Column_stat is created to read statistical data
+ on table columns from the statistical table column_stats, to update
+ column_stats with such statistical data, or to update columns
+ of the primary key, or to delete the record by its primary key or
+ its prefix.
+ Rows from the statistical table are read and updated always by
+ primary key.
+*/
+
+class Column_stat: public Stat_table
+{
+
+private:
+
+ Field *db_name_field; /* Field for the column column_stats.db_name */
+ Field *table_name_field; /* Field for the column column_stats.table_name */
+ Field *column_name_field; /* Field for the column column_stats.column_name */
+
+ Field *table_field; /* Field from 'table' to read /update statistics on */
+
+ void common_init_column_stat_table()
+ {
+ db_name_field= stat_table->field[COLUMN_STAT_DB_NAME];
+ table_name_field= stat_table->field[COLUMN_STAT_TABLE_NAME];
+ column_name_field= stat_table->field[COLUMN_STAT_COLUMN_NAME];
+ }
+
+ void change_full_table_name(LEX_STRING *db, LEX_STRING *tab)
+ {
+ db_name_field->store(db->str, db->length, system_charset_info);
+ table_name_field->store(tab->str, tab->length, system_charset_info);
+ }
+
+public:
+
+ /**
+ @details
+ The constructor 'tunes' the private and protected members of the
+ constructed object for the statistical table column_stats to read/update
+ statistics on fields of the table 'tab'. The TABLE structure for the table
+ column_stats must be passed as a value for the parameter 'stat'.
+ */
+
+ Column_stat(TABLE *stat, TABLE *tab) :Stat_table(stat, tab)
+ {
+ common_init_column_stat_table();
+ }
+
+
+ /**
+ @details
+ The constructor 'tunes' the private and protected members of the
+ object constructed for the statistical table column_stats for
+ the future updates/deletes of the record concerning the table 'tab'
+ from the database 'db'.
+ */
+
+ Column_stat(TABLE *stat, LEX_STRING *db, LEX_STRING *tab)
+ :Stat_table(stat, db, tab)
+ {
+ common_init_column_stat_table();
+ }
+
+ /**
+ @brief
+ Set table name fields for the statistical table column_stats
+
+ @details
+ The function stores the values of the fields db_name and table_name
+ of the statistical table column_stats in the record buffer.
+ */
+
+ void set_full_table_name()
+ {
+ db_name_field->store(db_name->str, db_name->length, system_charset_info);
+ table_name_field->store(table_name->str, table_name->length,
+ system_charset_info);
+ }
+
+
+ /**
+ @brief
+ Set the key fields for the statistical table column_stats
+
+ @param
+ col Field for the 'table' column to read/update statistics on
+
+ @details
+ The function stores the values of the fields db_name, table_name and
+ column_name in the record buffer for the statistical table column_stats.
+ These fields comprise the primary key for the table.
+ It also sets table_field to the passed parameter.
+
+ @note
+ The function is supposed to be called before any use of the
+ method find_stat for an object of the Column_stat class.
+ */
+
+ void set_key_fields(Field *col)
+ {
+ set_full_table_name();
+ const char *column_name= col->field_name;
+ column_name_field->store(column_name, strlen(column_name),
+ system_charset_info);
+ table_field= col;
+ }
+
+
+ /**
+ @brief
+ Update the table name fields in the current record of stat_table
+
+ @details
+ The function updates the primary key fields containing database name,
+ table name, and column name for the last found record in the statistical
+ table column_stats.
+
+ @retval
+ FALSE success with the update of the record
+ @retval
+ TRUE failure with the update of the record
+ */
+
+ bool update_column_key_part(const char *col)
+ {
+ store_record_for_update();
+ set_full_table_name();
+ column_name_field->store(col, strlen(col), system_charset_info);
+ bool rc= update_record();
+ store_record_for_lookup();
+ return rc;
+ }
+
+
+ /**
+ @brief
+ Store statistical data into statistical fields of column_stats
+
+ @details
+ This implementation of a purely virtual method sets the value of the
+ columns 'min_value', 'max_value', 'nulls_ratio', 'avg_length' and
+ 'avg_frequency' of the stistical table columns_stat according to the
+ contents of the bitmap write_stat.column_stat_nulls and the values
+ of the fields min_value, max_value, nulls_ratio, avg_length and
+ avg_frequency of the structure write_stat from the Field structure
+ for the field 'table_field'.
+ The value of the k-th column in the table columns_stat is set to NULL
+ if the k-th bit in the bitmap 'column_stat_nulls' is set to 1.
+
+ @note
+ A value from the field min_value/max_value is always converted
+ into a utf8 string. If the length of the column 'min_value'/'max_value'
+ is less than the length of the string the string is trimmed to fit the
+ length of the column.
+ */
+
+ void store_stat_fields()
+ {
+ char buff[MAX_FIELD_WIDTH];
+ String val(buff, sizeof(buff), &my_charset_utf8_bin);
+
+ for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_AVG_FREQUENCY; i++)
+ {
+ Field *stat_field= stat_table->field[i];
+ if (table_field->collected_stats->is_null(i))
+ stat_field->set_null();
+ else
+ {
+ stat_field->set_notnull();
+ switch (i) {
+ case COLUMN_STAT_MIN_VALUE:
+ if (table_field->type() == MYSQL_TYPE_BIT)
+ stat_field->store(table_field->collected_stats->min_value->val_int());
+ else
+ {
+ table_field->collected_stats->min_value->val_str(&val);
+ stat_field->store(val.ptr(), val.length(), &my_charset_utf8_bin);
+ }
+ break;
+ case COLUMN_STAT_MAX_VALUE:
+ if (table_field->type() == MYSQL_TYPE_BIT)
+ stat_field->store(table_field->collected_stats->max_value->val_int());
+ else
+ {
+ table_field->collected_stats->max_value->val_str(&val);
+ stat_field->store(val.ptr(), val.length(), &my_charset_utf8_bin);
+ }
+ break;
+ case COLUMN_STAT_NULLS_RATIO:
+ stat_field->store(table_field->collected_stats->get_nulls_ratio());
+ break;
+ case COLUMN_STAT_AVG_LENGTH:
+ stat_field->store(table_field->collected_stats->get_avg_length());
+ break;
+ case COLUMN_STAT_AVG_FREQUENCY:
+ stat_field->store(table_field->collected_stats->get_avg_frequency());
+ break;
+ }
+ }
+ }
+ }
+
+
+ /**
+ @brief
+ Read statistical data from statistical fields of column_stats
+
+ @details
+ This implementation of a purely virtual method first looks for a record
+ the statistical table column_stats by its primary key set the record
+ buffer with the help of Column_stat::set_key_fields. Then, if the row is
+ found, the function reads the values of the columns 'min_value',
+ 'max_value', 'nulls_ratio', 'avg_length' and 'avg_frequency' of the
+ table column_stat and sets accordingly the value of the bitmap
+ read_stat.column_stat_nulls' and the values of the fields min_value,
+ max_value, nulls_ratio, avg_length and avg_frequency of the structure
+ read_stat from the Field structure for the field 'table_field'.
+ */
+
+ void get_stat_values()
+ {
+ table_field->read_stats->set_all_nulls();
+
+ if (table_field->read_stats->min_value)
+ table_field->read_stats->min_value->set_null();
+ if (table_field->read_stats->max_value)
+ table_field->read_stats->max_value->set_null();
+
+ if (find_stat())
+ {
+ char buff[MAX_FIELD_WIDTH];
+ String val(buff, sizeof(buff), &my_charset_utf8_bin);
+
+ for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_AVG_FREQUENCY; i++)
+ {
+ Field *stat_field= stat_table->field[i];
+
+ if (!stat_field->is_null() &&
+ (i > COLUMN_STAT_MAX_VALUE ||
+ (i == COLUMN_STAT_MIN_VALUE &&
+ table_field->read_stats->min_value) ||
+ (i == COLUMN_STAT_MAX_VALUE &&
+ table_field->read_stats->max_value)))
+ {
+ table_field->read_stats->set_not_null(i);
+
+ switch (i) {
+ case COLUMN_STAT_MIN_VALUE:
+ stat_field->val_str(&val);
+ table_field->read_stats->min_value->store(val.ptr(), val.length(),
+ &my_charset_utf8_bin);
+ break;
+ case COLUMN_STAT_MAX_VALUE:
+ stat_field->val_str(&val);
+ table_field->read_stats->max_value->store(val.ptr(), val.length(),
+ &my_charset_utf8_bin);
+ break;
+ case COLUMN_STAT_NULLS_RATIO:
+ table_field->read_stats->set_nulls_ratio(stat_field->val_real());
+ break;
+ case COLUMN_STAT_AVG_LENGTH:
+ table_field->read_stats->set_avg_length(stat_field->val_real());
+ break;
+ case COLUMN_STAT_AVG_FREQUENCY:
+ table_field->read_stats->set_avg_frequency(stat_field->val_real());
+ break;
+ }
+ }
+ }
+ }
+ }
+
+};
+
+
+/*
+ An object of the class Index_stat is created to read statistical
+ data on tables from the statistical table table_stat, to update
+ index_stats with such statistical data, or to update columns
+ of the primary key, or to delete the record by its primary key or
+ its prefix.
+ Rows from the statistical table are read and updated always by
+ primary key.
+*/
+
+class Index_stat: public Stat_table
+{
+
+private:
+
+ Field *db_name_field; /* Field for the column index_stats.db_name */
+ Field *table_name_field; /* Field for the column index_stats.table_name */
+ Field *index_name_field; /* Field for the column index_stats.table_name */
+ Field *prefix_arity_field; /* Field for the column index_stats.prefix_arity */
+
+ KEY *table_key_info; /* Info on the index to read/update statistics on */
+ uint prefix_arity; /* Number of components of the index prefix of interest */
+
+ void common_init_index_stat_table()
+ {
+ db_name_field= stat_table->field[INDEX_STAT_DB_NAME];
+ table_name_field= stat_table->field[INDEX_STAT_TABLE_NAME];
+ index_name_field= stat_table->field[INDEX_STAT_INDEX_NAME];
+ prefix_arity_field= stat_table->field[INDEX_STAT_PREFIX_ARITY];
+ }
+
+ void change_full_table_name(LEX_STRING *db, LEX_STRING *tab)
+ {
+ db_name_field->store(db->str, db->length, system_charset_info);
+ table_name_field->store(tab->str, tab->length, system_charset_info);
+ }
+
+public:
+
+
+ /**
+ @details
+ The constructor 'tunes' the private and protected members of the
+ constructed object for the statistical table index_stats to read/update
+ statistics on prefixes of different indexes of the table 'tab'.
+ The TABLE structure for the table index_stats must be passed as a value
+ for the parameter 'stat'.
+ */
+
+ Index_stat(TABLE *stat, TABLE*tab) :Stat_table(stat, tab)
+ {
+ common_init_index_stat_table();
+ }
+
+
+ /**
+ @details
+ The constructor 'tunes' the private and protected members of the
+ object constructed for the statistical table index_stats for
+ the future updates/deletes of the record concerning the table 'tab'
+ from the database 'db'.
+ */
+
+ Index_stat(TABLE *stat, LEX_STRING *db, LEX_STRING *tab)
+ :Stat_table(stat, db, tab)
+ {
+ common_init_index_stat_table();
+ }
+
+
+ /**
+ @brief
+ Set table name fields for the statistical table index_stats
+
+ @details
+ The function stores the values of the fields db_name and table_name
+ of the statistical table index_stats in the record buffer.
+ */
+
+ void set_full_table_name()
+ {
+ db_name_field->store(db_name->str, db_name->length, system_charset_info);
+ table_name_field->store(table_name->str, table_name->length,
+ system_charset_info);
+ }
+
+ /**
+ @brief
+ Set the key fields of index_stats used to access records for index prefixes
+
+ @param
+ index_info Info for the index of 'table' to read/update statistics on
+
+ @details
+ The function sets the values of the fields db_name, table_name and
+ index_name in the record buffer for the statistical table index_stats.
+ It also sets table_key_info to the passed parameter.
+
+ @note
+ The function is supposed to be called before any use of the method
+ find_next_stat_for_prefix for an object of the Index_stat class.
+ */
+
+ void set_index_prefix_key_fields(KEY *index_info)
+ {
+ set_full_table_name();
+ char *index_name= index_info->name;
+ index_name_field->store(index_name, strlen(index_name),
+ system_charset_info);
+ table_key_info= index_info;
+ }
+
+
+ /**
+ @brief
+ Set the key fields for the statistical table index_stats
+
+ @param
+ index_info Info for the index of 'table' to read/update statistics on
+ @param
+ index_prefix_arity Number of components in the index prefix of interest
+
+ @details
+ The function sets the values of the fields db_name, table_name and
+ index_name, prefix_arity in the record buffer for the statistical
+ table index_stats. These fields comprise the primary key for the table.
+
+ @note
+ The function is supposed to be called before any use of the
+ method find_stat for an object of the Index_stat class.
+ */
+
+ void set_key_fields(KEY *index_info, uint index_prefix_arity)
+ {
+ set_index_prefix_key_fields(index_info);
+ prefix_arity= index_prefix_arity;
+ prefix_arity_field->store(index_prefix_arity, TRUE);
+ }
+
+
+ /**
+ @brief
+ Store statistical data into statistical fields of table index_stats
+
+ @details
+ This implementation of a purely virtual method sets the value of the
+ column 'avg_frequency' of the statistical table index_stats according to
+ the value of write_stat.avg_frequency[Index_stat::prefix_arity]
+ from the KEY_INFO structure 'table_key_info'.
+ If the value of write_stat. avg_frequency[Index_stat::prefix_arity] is
+ equal to 0, the value of the column is set to NULL.
+ */
+
+ void store_stat_fields()
+ {
+ Field *stat_field= stat_table->field[INDEX_STAT_AVG_FREQUENCY];
+ double avg_frequency=
+ table_key_info->collected_stats->get_avg_frequency(prefix_arity-1);
+ if (avg_frequency == 0)
+ stat_field->set_null();
+ else
+ {
+ stat_field->set_notnull();
+ stat_field->store(avg_frequency);
+ }
+ }
+
+
+ /**
+ @brief
+ Read statistical data from statistical fields of index_stats
+
+ @details
+ This implementation of a purely virtual method first looks for a record the
+ statistical table index_stats by its primary key set the record buffer with
+ the help of Index_stat::set_key_fields. If the row is found the function
+ reads the value of the column 'avg_freguency' of the table index_stat and
+ sets the value of read_stat.avg_frequency[Index_stat::prefix_arity]
+ from the KEY_INFO structure 'table_key_info' accordingly. If the value of
+ the column is NULL, read_stat.avg_frequency[Index_stat::prefix_arity] is
+ set to 0. Otherwise, read_stat.avg_frequency[Index_stat::prefix_arity] is
+ set to the value of the column.
+ */
+
+ void get_stat_values()
+ {
+ double avg_frequency= 0;
+ if(find_stat())
+ {
+ Field *stat_field= stat_table->field[INDEX_STAT_AVG_FREQUENCY];
+ if (!stat_field->is_null())
+ avg_frequency= stat_field->val_real();
+ }
+ table_key_info->read_stats->set_avg_frequency(prefix_arity-1, avg_frequency);
+ }
+
+};
+
+
+/*
+ The class Count_distinct_field is a helper class used to calculate
+ the number of distinct values for a column. The class employs the
+ Unique class for this purpose.
+ The class Count_distinct_field is used only by the function
+ collect_statistics_for_table to calculate the values for
+ column avg_frequency of the statistical table column_stats.
+*/
+
+class Count_distinct_field: public Sql_alloc
+{
+protected:
+
+ /* Field for which the number of distinct values is to be find out */
+ Field *table_field;
+ Unique *tree; /* The helper object to contain distinct values */
+ uint tree_key_length; /* The length of the keys for the elements of 'tree */
+
+public:
+
+ /**
+ @param
+ field Field for which the number of distinct values is
+ to be find out
+ @param
+ max_heap_table_size The limit for the memory used by the RB tree container
+ of the constructed Unique object 'tree'
+
+ @details
+ The constructor sets the values of 'table_field' and 'tree_key_length',
+ and then calls the 'new' operation to create a Unique object for 'tree'.
+ The type of 'field' and the value max_heap_table_size of determine the set
+ of the parameters to be passed to the constructor of the Unique object.
+ */
+
+ Count_distinct_field(Field *field, uint max_heap_table_size)
+ {
+ qsort_cmp2 compare_key;
+ void* cmp_arg;
+ enum enum_field_types f_type= field->type();
+
+ table_field= field;
+ tree_key_length= field->pack_length();
+
+ if ((f_type == MYSQL_TYPE_VARCHAR) ||
+ (!field->binary() && (f_type == MYSQL_TYPE_STRING ||
+ f_type == MYSQL_TYPE_VAR_STRING)))
+ {
+ compare_key= (qsort_cmp2) simple_str_key_cmp;
+ cmp_arg= (void*) field;
+ }
+ else
+ {
+ cmp_arg= (void*) &tree_key_length;
+ compare_key= (qsort_cmp2) simple_raw_key_cmp;
+ }
+
+ tree= new Unique(compare_key, cmp_arg,
+ tree_key_length, max_heap_table_size);
+ }
+
+ virtual ~Count_distinct_field()
+ {
+ delete tree;
+ tree= NULL;
+ }
+
+ /*
+ @brief
+ Check whether the Unique object tree has been successfully created
+ */
+ bool exists()
+ {
+ return (tree != NULL);
+ }
+
+ /*
+ @brief
+ Add the value of 'field' to the container of the Unique object 'tree'
+ */
+ virtual bool add()
+ {
+ return tree->unique_add(table_field->ptr);
+ }
+
+ /*
+ @brief
+ Calculate the number of elements accumulated in the container of 'tree'
+ */
+ ulonglong get_value()
+ {
+ ulonglong count;
+ if (tree->elements == 0)
+ return (ulonglong) tree->elements_in_tree();
+ count= 0;
+ tree->walk(table_field->table, count_distinct_walk, (void*) &count);
+ return count;
+ }
+};
+
+
+/*
+ The class Count_distinct_field_bit is derived from the class
+ Count_distinct_field to be used only for fields of the MYSQL_TYPE_BIT type.
+ The class provides a different implementation for the method add
+*/
+
+class Count_distinct_field_bit: public Count_distinct_field
+{
+public:
+ Count_distinct_field_bit(Field *field, uint max_heap_table_size)
+ :Count_distinct_field(field, max_heap_table_size) {}
+ bool add()
+ {
+ longlong val= table_field->val_int();
+ return tree->unique_add(&val);
+ }
+};
+
+
+/*
+ The class Index_prefix_calc is a helper class used to calculate the values
+ for the column 'avg_frequency' of the statistical table index_stats.
+ For any table t from the database db and any k-component prefix of the
+ index i for this table the row from index_stats with the primary key
+ (db,t,i,k) must contain in the column 'avg_frequency' either NULL or
+ the number that is the ratio of N and V, where N is the number of index
+ entries without NULL values in the first k components of the index i,
+ and V is the number of distinct tuples composed of the first k components
+ encountered among these index entries.
+ Currently the objects of this class are used only by the function
+ collect_statistics_for_index.
+*/
+
+class Index_prefix_calc: public Sql_alloc
+{
+
+private:
+
+ /* Table containing index specified by index_info */
+ TABLE *index_table;
+ /* Info for the index i for whose prefix 'avg_frequency' is calculated */
+ KEY *index_info;
+ /* The maximum number of the components in the prefixes of interest */
+ uint prefixes;
+ bool empty;
+
+ /* This structure is created for every k components of the index i */
+ class Prefix_calc_state
+ {
+ public:
+ /*
+ The number of the scanned index entries without nulls
+ in the first k components
+ */
+ ulonglong entry_count;
+ /*
+ The number if the scanned index entries without nulls with
+ the last encountered k-component prefix
+ */
+ ulonglong prefix_count;
+ /* The values of the last encountered k-component prefix */
+ Cached_item *last_prefix;
+ };
+
+ /*
+ Array of structures used to calculate 'avg_frequency' for different
+ prefixes of the index i
+ */
+ Prefix_calc_state *calc_state;
+
+public:
+
+ bool is_single_comp_pk;
+
+ Index_prefix_calc(TABLE *table, KEY *key_info)
+ : index_table(table), index_info(key_info)
+ {
+ uint i;
+ Prefix_calc_state *state;
+ uint key_parts= table->actual_n_key_parts(key_info);
+ empty= TRUE;
+ prefixes= 0;
+
+ is_single_comp_pk= FALSE;
+ uint pk= table->s->primary_key;
+ if ((uint) (table->key_info - key_info) == pk &&
+ table->key_info[pk].key_parts == 1)
+ {
+ prefixes= 1;
+ is_single_comp_pk= TRUE;
+ return;
+ }
+
+ if ((calc_state=
+ (Prefix_calc_state *) sql_alloc(sizeof(Prefix_calc_state)*key_parts)))
+ {
+ uint keyno= key_info-table->key_info;
+ for (i= 0, state= calc_state; i < key_parts; i++, state++)
+ {
+ /*
+ Do not consider prefixes containing a component that is only part
+ of the field. This limitation is set to avoid fetching data when
+ calculating the values of 'avg_frequency' for prefixes.
+ */
+ if (!key_info->key_part[i].field->part_of_key.is_set(keyno))
+ break;
+
+ if (!(state->last_prefix=
+ new Cached_item_field(key_info->key_part[i].field)))
+ break;
+ state->entry_count= state->prefix_count= 0;
+ prefixes++;
+ }
+ }
+ }
+
+
+ /**
+ @breif
+ Change the elements of calc_state after reading the next index entry
+
+ @details
+ This function is to be called at the index scan each time the next
+ index entry has been read into the record buffer.
+ For each of the index prefixes the function checks whether nulls
+ are encountered in any of the k components of the prefix.
+ If this is not the case the value of calc_state[k-1].entry_count
+ is incremented by 1. Then the function checks whether the value of
+ any of these k components has changed. If so, the value of
+ calc_state[k-1].prefix_count is incremented by 1.
+ */
+
+ void add()
+ {
+ uint i;
+ Prefix_calc_state *state;
+ uint first_changed= prefixes;
+ for (i= prefixes, state= calc_state+prefixes-1; i; i--, state--)
+ {
+ if (state->last_prefix->cmp())
+ first_changed= i-1;
+ }
+ if (empty)
+ {
+ first_changed= 0;
+ empty= FALSE;
+ }
+ for (i= 0, state= calc_state; i < prefixes; i++, state++)
+ {
+ if (state->last_prefix->null_value)
+ break;
+ if (i >= first_changed)
+ state->prefix_count++;
+ state->entry_count++;
+ }
+ }
+
+ /**
+ @brief
+ Calculate the values of avg_frequency for all prefixes of an index
+
+ @details
+ This function is to be called after the index scan to count the number
+ of distinct index prefixes has been done. The function calculates
+ the value of avg_frequency for the index prefix with k components
+ as calc_state[k-1].entry_count/calc_state[k-1].prefix_count.
+ If calc_state[k-1].prefix_count happens to be 0, the value of
+ avg_frequency[k-1] is set to 0, i.e. is considered as unknown.
+ */
+
+ void get_avg_frequency()
+ {
+ uint i;
+ Prefix_calc_state *state;
+
+ if (is_single_comp_pk)
+ {
+ index_info->collected_stats->set_avg_frequency(0, 1.0);
+ return;
+ }
+
+ for (i= 0, state= calc_state; i < prefixes; i++, state++)
+ {
+ if (i < prefixes)
+ {
+ double val= state->prefix_count == 0 ?
+ 0 : (double) state->entry_count / state->prefix_count;
+ index_info->collected_stats->set_avg_frequency(i, val);
+ }
+ }
+ }
+};
+
+
+/**
+ @brief
+ Create fields for min/max values to collect column statistics
+
+ @param
+ table Table the fields are created for
+
+ @details
+ The function first allocates record buffers to store min/max values
+ for 'table's fields. Then for each table field f it creates Field structures
+ that points to these buffers rather that to the record buffer as the
+ Field object for f does. The pointers of the created fields are placed
+ in the collected_stats structure of the Field object for f.
+ The function allocates the buffers for min/max values in the table
+ memory.
+
+ @note
+ The buffers allocated when min/max values are used to read statistics
+ from the persistent statistical tables differ from those buffers that
+ are used when statistics on min/max values for column is collected
+ as they are allocated in different mem_roots.
+ The same is true for the fields created for min/max values.
+*/
+
+static
+void create_min_max_statistical_fields_for_table(TABLE *table)
+{
+ uint rec_buff_length= table->s->rec_buff_length;
+
+ if ((table->collected_stats->min_max_record_buffers=
+ (uchar *) alloc_root(&table->mem_root, 2*rec_buff_length)))
+ {
+ uchar *record= table->collected_stats->min_max_record_buffers;
+ memset(record, 0, 2*rec_buff_length);
+
+ for (uint i=0; i < 2; i++, record+= rec_buff_length)
+ {
+ for (Field **field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ Field *fld;
+ Field *table_field= *field_ptr;
+ my_ptrdiff_t diff= record-table->record[0];
+ if (!bitmap_is_set(table->read_set, table_field->field_index))
+ continue;
+ if (!(fld= table_field->clone(&table->mem_root, table, diff, TRUE)))
+ continue;
+ if (i == 0)
+ table_field->collected_stats->min_value= fld;
+ else
+ table_field->collected_stats->max_value= fld;
+ }
+ }
+ }
+}
+
+
+/**
+ @brief
+ Create fields for min/max values to read column statistics
+
+ @param
+ thd Thread handler
+ @param
+ table_share Table share the fields are created for
+ @param
+ is_safe TRUE <-> at any time only one thread can perform the function
+
+ @details
+ The function first allocates record buffers to store min/max values
+ for 'table_share's fields. Then for each field f it creates Field structures
+ that points to these buffers rather that to the record buffer as the
+ Field object for f does. The pointers of the created fields are placed
+ in the read_stats structure of the Field object for f.
+ The function allocates the buffers for min/max values in the table share
+ memory.
+ If the parameter is_safe is TRUE then it is guaranteed that at any given time
+ only one thread is executed the code of the function.
+
+ @note
+ The buffers allocated when min/max values are used to collect statistics
+ from the persistent statistical tables differ from those buffers that
+ are used when statistics on min/max values for column is read as they
+ are allocated in different mem_roots.
+ The same is true for the fields created for min/max values.
+*/
+
+static
+void create_min_max_statistical_fields_for_table_share(THD *thd,
+ TABLE_SHARE *table_share)
+{
+ TABLE_STATISTICS_CB *stats_cb= &table_share->stats_cb;
+ Table_statistics *stats= stats_cb->table_stats;
+
+ if (stats->min_max_record_buffers)
+ return;
+
+ uint rec_buff_length= table_share->rec_buff_length;
+
+ if ((stats->min_max_record_buffers=
+ (uchar *) alloc_root(&stats_cb->mem_root, 2*rec_buff_length)))
+ {
+ uchar *record= stats->min_max_record_buffers;
+ memset(record, 0, 2*rec_buff_length);
+
+ for (uint i=0; i < 2; i++, record+= rec_buff_length)
+ {
+ for (Field **field_ptr= table_share->field; *field_ptr; field_ptr++)
+ {
+ Field *fld;
+ Field *table_field= *field_ptr;
+ my_ptrdiff_t diff= record - table_share->default_values;
+ if (!(fld= table_field->clone(&stats_cb->mem_root, diff)))
+ continue;
+ if (i == 0)
+ table_field->read_stats->min_value= fld;
+ else
+ table_field->read_stats->max_value= fld;
+ }
+ }
+ }
+
+}
+
+
+/**
+ @brief
+ Allocate memory for the table's statistical data to be collected
+
+ @param
+ table Table for which the memory for statistical data is allocated
+
+ @note
+ The function allocates the memory for the statistical data on 'table' with
+ the intention to collect the data there. The memory is allocated for
+ the statistics on the table, on the table's columns, and on the table's
+ indexes. The memory is allocated in the table's mem_root.
+
+ @retval
+ 0 If the memory for all statistical data has been successfully allocated
+ @retval
+ 1 Otherwise
+
+ @note
+ Each thread allocates its own memory to collect statistics on the table
+ It allows us, for example, to collect statistics on the different indexes
+ of the same table in parallel.
+*/
+
+int alloc_statistics_for_table(THD* thd, TABLE *table)
+{
+ Field **field_ptr;
+ uint fields;
+
+ DBUG_ENTER("alloc_statistics_for_table");
+
+
+ Table_statistics *table_stats=
+ (Table_statistics *) alloc_root(&table->mem_root,
+ sizeof(Table_statistics));
+
+ fields= table->s->fields ;
+ Column_statistics_collected *column_stats=
+ (Column_statistics_collected *) alloc_root(&table->mem_root,
+ sizeof(Column_statistics_collected) *
+ (fields+1));
+
+ uint keys= table->s->keys;
+ Index_statistics *index_stats=
+ (Index_statistics *) alloc_root(&table->mem_root,
+ sizeof(Index_statistics) * keys);
+
+ uint key_parts= table->s->ext_key_parts;
+ ulong *idx_avg_frequency= (ulong*) alloc_root(&table->mem_root,
+ sizeof(ulong) * key_parts);
+
+ if (!table_stats || !column_stats || !index_stats || !idx_avg_frequency)
+ DBUG_RETURN(1);
+
+ table->collected_stats= table_stats;
+ table_stats->column_stats= column_stats;
+ table_stats->index_stats= index_stats;
+ table_stats->idx_avg_frequency= idx_avg_frequency;
+
+ memset(column_stats, 0, sizeof(Column_statistics) * (fields+1));
+
+ for (field_ptr= table->field; *field_ptr; field_ptr++, column_stats++)
+ {
+ (*field_ptr)->collected_stats= column_stats;
+ (*field_ptr)->collected_stats->max_value= NULL;
+ (*field_ptr)->collected_stats->min_value= NULL;
+ }
+
+ memset(idx_avg_frequency, 0, sizeof(ulong) * key_parts);
+
+ KEY *key_info, *end;
+ for (key_info= table->key_info, end= key_info + table->s->keys;
+ key_info < end;
+ key_info++, index_stats++)
+ {
+ key_info->collected_stats= index_stats;
+ key_info->collected_stats->init_avg_frequency(idx_avg_frequency);
+ idx_avg_frequency+= key_info->ext_key_parts;
+ }
+
+ create_min_max_statistical_fields_for_table(table);
+
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief
+ Check whether any persistent statistics for the processed command is needed
+
+ @param
+ thd The thread handle
+
+ @details
+ The function checks whether any persitent statistics for the processed
+ command is needed to be read.
+
+ @retval
+ TRUE statistics is needed to be read
+ @retval
+ FALSE Otherwise
+*/
+
+static
+inline bool statistics_for_command_is_needed(THD *thd)
+{
+ if (thd->bootstrap || thd->variables.use_stat_tables == NEVER)
+ return FALSE;
+
+ switch(thd->lex->sql_command) {
+ case SQLCOM_SELECT:
+ case SQLCOM_INSERT:
+ case SQLCOM_INSERT_SELECT:
+ case SQLCOM_UPDATE:
+ case SQLCOM_UPDATE_MULTI:
+ case SQLCOM_DELETE:
+ case SQLCOM_DELETE_MULTI:
+ case SQLCOM_REPLACE:
+ case SQLCOM_REPLACE_SELECT:
+ break;
+ default:
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+/**
+ @brief
+ Allocate memory for the statistical data used by a table share
+
+ @param
+ thd Thread handler
+ @param
+ table_share Table share for which the memory for statistical data is allocated
+ @param
+ is_safe TRUE <-> at any time only one thread can perform the function
+
+ @note
+ The function allocates the memory for the statistical data on a table in the
+ table's share memory with the intention to read the statistics there from
+ the system persistent statistical tables mysql.table_stat, mysql.column_stats,
+ mysql.index_stats. The memory is allocated for the statistics on the table,
+ on the tables's columns, and on the table's indexes. The memory is allocated
+ in the table_share's mem_root.
+ If the parameter is_safe is TRUE then it is guaranteed that at any given time
+ only one thread is executed the code of the function.
+
+ @retval
+ 0 If the memory for all statistical data has been successfully allocated
+ @retval
+ 1 Otherwise
+
+ @note
+ The situation when more than one thread try to allocate memory for
+ statistical data is rare. It happens under the following scenario:
+ 1. One thread executes a query over table t with the system variable
+ 'use_stat_tables' set to 'never'.
+ 2. After this the second thread sets 'use_stat_tables' to 'preferably'
+ and executes a query over table t.
+ 3. Simultaneously the third thread sets 'use_stat_tables' to 'preferably'
+ and executes a query over table t.
+ Here the second and the third threads try to allocate the memory for
+ statistical data at the same time. The precautions are taken to
+ guarantee the correctness of the allocation.
+
+ @note
+ Currently the function always is called with the parameter is_safe set
+ to FALSE.
+
+*/
+
+int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share,
+ bool is_safe)
+{
+
+ Field **field_ptr;
+ KEY *key_info, *end;
+ TABLE_STATISTICS_CB *stats_cb= &table_share->stats_cb;
+
+ DBUG_ENTER("alloc_statistics_for_table_share");
+
+ DEBUG_SYNC(thd, "statistics_mem_alloc_start1");
+ DEBUG_SYNC(thd, "statistics_mem_alloc_start2");
+
+ if (!statistics_for_command_is_needed(thd))
+ DBUG_RETURN(1);
+
+ if (!is_safe)
+ mysql_mutex_lock(&table_share->LOCK_ha_data);
+
+ if (stats_cb->stats_can_be_read)
+ {
+ if (!is_safe)
+ mysql_mutex_unlock(&table_share->LOCK_ha_data);
+ DBUG_RETURN(0);
+ }
+
+ Table_statistics *table_stats= stats_cb->table_stats;
+ if (!table_stats)
+ {
+ table_stats= (Table_statistics *) alloc_root(&stats_cb->mem_root,
+ sizeof(Table_statistics));
+ if (!table_stats)
+ {
+ if (!is_safe)
+ mysql_mutex_unlock(&table_share->LOCK_ha_data);
+ DBUG_RETURN(1);
+ }
+ memset(table_stats, 0, sizeof(Table_statistics));
+ stats_cb->table_stats= table_stats;
+ }
+
+ uint fields= table_share->fields;
+ Column_statistics *column_stats= table_stats->column_stats;
+ if (!column_stats)
+ {
+ column_stats= (Column_statistics *) alloc_root(&stats_cb->mem_root,
+ sizeof(Column_statistics) *
+ (fields+1));
+ if (column_stats)
+ {
+ memset(column_stats, 0, sizeof(Column_statistics) * (fields+1));
+ table_stats->column_stats= column_stats;
+ for (field_ptr= table_share->field;
+ *field_ptr;
+ field_ptr++, column_stats++)
+ {
+ (*field_ptr)->read_stats= column_stats;
+ (*field_ptr)->read_stats->min_value= NULL;
+ (*field_ptr)->read_stats->max_value= NULL;
+ }
+ create_min_max_statistical_fields_for_table_share(thd, table_share);
+ }
+ }
+
+ uint keys= table_share->keys;
+ Index_statistics *index_stats= table_stats->index_stats;
+ if (!index_stats)
+ {
+ index_stats= (Index_statistics *) alloc_root(&stats_cb->mem_root,
+ sizeof(Index_statistics) *
+ keys);
+ if (index_stats)
+ {
+ table_stats->index_stats= index_stats;
+ for (key_info= table_share->key_info, end= key_info + keys;
+ key_info < end;
+ key_info++, index_stats++)
+ {
+ key_info->read_stats= index_stats;
+ }
+ }
+ }
+
+ uint key_parts= table_share->ext_key_parts;
+ ulong *idx_avg_frequency= table_stats->idx_avg_frequency;
+ if (!idx_avg_frequency)
+ {
+ idx_avg_frequency= (ulong*) alloc_root(&stats_cb->mem_root,
+ sizeof(ulong) * key_parts);
+ if (idx_avg_frequency)
+ {
+ memset(idx_avg_frequency, 0, sizeof(ulong) * key_parts);
+ table_stats->idx_avg_frequency= idx_avg_frequency;
+ for (key_info= table_share->key_info, end= key_info + keys;
+ key_info < end;
+ key_info++)
+ {
+ key_info->read_stats->init_avg_frequency(idx_avg_frequency);
+ idx_avg_frequency+= key_info->ext_key_parts;
+ }
+ }
+ }
+
+ if (column_stats && index_stats && idx_avg_frequency)
+ stats_cb->stats_can_be_read= TRUE;
+
+ if (!is_safe)
+ mysql_mutex_unlock(&table_share->LOCK_ha_data);
+
+
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief
+ Initialize the aggregation fields to collect statistics on a column
+
+ @param
+ thd Thread handler
+ @param
+ table_field Column to collect statistics for
+*/
+
+inline
+void Column_statistics_collected::init(THD *thd, Field *table_field)
+{
+ uint max_heap_table_size= thd->variables.max_heap_table_size;
+ TABLE *table= table_field->table;
+ uint pk= table->s->primary_key;
+
+ is_single_pk_col= FALSE;
+
+ if (pk != MAX_KEY && table->key_info[pk].key_parts == 1 &&
+ table->key_info[pk].key_part[0].fieldnr == table_field->field_index + 1)
+ is_single_pk_col= TRUE;
+
+ column= table_field;
+
+ set_all_nulls();
+
+ nulls= 0;
+ column_total_length= 0;
+ if (is_single_pk_col)
+ count_distinct= NULL;
+ if (table_field->flags & BLOB_FLAG)
+ count_distinct= NULL;
+ else
+ {
+ count_distinct=
+ table_field->type() == MYSQL_TYPE_BIT ?
+ new Count_distinct_field_bit(table_field, max_heap_table_size) :
+ new Count_distinct_field(table_field, max_heap_table_size);
+ }
+ if (count_distinct && !count_distinct->exists())
+ count_distinct= NULL;
+}
+
+
+/**
+ @brief
+ Perform aggregation for a row when collecting statistics on a column
+
+ @param
+ rowno The order number of the row
+*/
+
+inline
+void Column_statistics_collected::add(ha_rows rowno)
+{
+
+ if (column->is_null())
+ nulls++;
+ else
+ {
+ column_total_length+= column->value_length();
+ if (min_value && column->update_min(min_value, rowno == nulls))
+ set_not_null(COLUMN_STAT_MIN_VALUE);
+ if (max_value && column->update_max(max_value, rowno == nulls))
+ set_not_null(COLUMN_STAT_MAX_VALUE);
+ if (count_distinct)
+ count_distinct->add();
+ }
+}
+
+
+/**
+ @brief
+ Get the results of aggregation when collecting the statistics on a column
+
+ @param
+ rows The total number of rows in the table
+*/
+
+inline
+void Column_statistics_collected::finish(ha_rows rows)
+{
+ double val;
+
+ if (rows)
+ {
+ val= (double) nulls / rows;
+ set_nulls_ratio(val);
+ set_not_null(COLUMN_STAT_NULLS_RATIO);
+ }
+ if (rows - nulls)
+ {
+ val= (double) column_total_length / (rows - nulls);
+ set_avg_length(val);
+ set_not_null(COLUMN_STAT_AVG_LENGTH);
+ }
+ if (count_distinct)
+ {
+ ulonglong distincts= count_distinct->get_value();
+ if (distincts)
+ {
+ val= (double) (rows - nulls) / distincts;
+ set_avg_frequency(val);
+ set_not_null(COLUMN_STAT_AVG_FREQUENCY);
+ }
+ delete count_distinct;
+ count_distinct= NULL;
+ }
+ else if (is_single_pk_col)
+ {
+ val= 1.0;
+ set_avg_frequency(val);
+ set_not_null(COLUMN_STAT_AVG_FREQUENCY);
+ }
+}
+
+
+/**
+ @brief
+ Clean up auxiliary structures used for aggregation
+*/
+
+inline
+void Column_statistics_collected::cleanup()
+{
+ if (count_distinct)
+ {
+ delete count_distinct;
+ count_distinct= NULL;
+ }
+}
+
+
+/**
+ @brief
+ Collect statistical data on an index
+
+ @param
+ table The table the index belongs to
+ index The number of this index in the table
+
+ @details
+ The function collects the value of 'avg_frequency' for the prefixes
+ on an index from 'table'. The index is specified by its number.
+ If the scan is successful the calculated statistics is saved in the
+ elements of the array write_stat.avg_frequency of the KEY_INFO structure
+ for the index. The statistics for the prefix with k components is saved
+ in the element number k-1.
+
+ @retval
+ 0 If the statistics has been successfully collected
+ @retval
+ 1 Otherwise
+
+ @note
+ The function collects statistics for the index prefixes for one index
+ scan during which no data is fetched from the table records. That's why
+ statistical data for prefixes that contain part of a field is not
+ collected.
+ The function employs an object of the helper class Index_prefix_calc to
+ count for each index prefix the number of index entries without nulls and
+ the number of distinct entries among them.
+
+*/
+
+static
+int collect_statistics_for_index(THD *thd, TABLE *table, uint index)
+{
+ int rc= 0;
+ KEY *key_info= &table->key_info[index];
+ ha_rows rows= 0;
+ Index_prefix_calc index_prefix_calc(table, key_info);
+ DBUG_ENTER("collect_statistics_for_index");
+
+ DEBUG_SYNC(table->in_use, "statistics_collection_start1");
+ DEBUG_SYNC(table->in_use, "statistics_collection_start2");
+
+ if (index_prefix_calc.is_single_comp_pk)
+ {
+ index_prefix_calc.get_avg_frequency();
+ DBUG_RETURN(rc);
+ }
+
+ table->key_read= 1;
+ table->file->extra(HA_EXTRA_KEYREAD);
+
+ table->file->ha_index_init(index, TRUE);
+ rc= table->file->ha_index_first(table->record[0]);
+ while (rc != HA_ERR_END_OF_FILE)
+ {
+ if (thd->killed)
+ break;
+
+ if (rc)
+ break;
+ rows++;
+ index_prefix_calc.add();
+ rc= table->file->ha_index_next(table->record[0]);
+ }
+ table->key_read= 0;
+ table->file->ha_index_end();
+
+ rc= (rc == HA_ERR_END_OF_FILE && !thd->killed) ? 0 : 1;
+
+ if (!rc)
+ index_prefix_calc.get_avg_frequency();
+
+ DBUG_RETURN(rc);
+}
+
+
+/**
+ @brief
+ Collect statistical data for a table
+
+ @param
+ thd The thread handle
+ @param
+ table The table to collect statistics on
+
+ @details
+ The function collects data for various statistical characteristics on
+ the table 'table'. These data is saved in the internal fields that could
+ be reached from 'table'. The data is prepared to be saved in the persistent
+ statistical table by the function update_statistics_for_table.
+ The collected statistical values are not placed in the same fields that
+ keep the statistical data used by the optimizer. Therefore, at any time,
+ there is no collision between the statistics being collected and the one
+ used by the optimizer to look for optimal query execution plans for other
+ clients.
+
+ @retval
+ 0 If the statistics has been successfully collected
+ @retval
+ 1 Otherwise
+
+ @note
+ The function first collects statistical data for statistical characteristics
+ to be saved in the statistical tables table_stat and column_stats. To do this
+ it performs a full table scan of 'table'. At this scan the function collects
+ statistics on each column of the table and count the total number of the
+ scanned rows. To calculate the value of 'avg_frequency' for a column the
+ function constructs an object of the helper class Count_distinct_field
+ (or its derivation). Currently this class cannot count the number of
+ distinct values for blob columns. So the value of 'avg_frequency' for
+ blob columns is always null.
+ After the full table scan the function calls collect_statistics_for_index
+ for each table index. The latter performs full index scan for each index.
+
+ @note
+ Currently the statistical data is collected indiscriminately for all
+ columns/indexes of 'table', for all statistical characteristics.
+ TODO. Collect only specified statistical characteristics for specified
+ columns/indexes.
+
+ @note
+ Currently the process of collecting statistical data is not optimized.
+ For example, 'avg_frequency' for a column could be copied from the
+ 'avg_frequency' collected for an index if this column is used as the
+ first component of the index. Min and min values for this column could
+ be extracted from the index as well.
+*/
+
+int collect_statistics_for_table(THD *thd, TABLE *table)
+{
+ int rc;
+ Field **field_ptr;
+ Field *table_field;
+ ha_rows rows= 0;
+ handler *file=table->file;
+
+ DBUG_ENTER("collect_statistics_for_table");
+
+ table->collected_stats->cardinality_is_null= TRUE;
+ table->collected_stats->cardinality= 0;
+
+ for (field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ table_field= *field_ptr;
+ if (!bitmap_is_set(table->read_set, table_field->field_index))
+ continue;
+ table_field->collected_stats->init(thd, table_field);
+ }
+
+ /* Perform a full table scan to collect statistics on 'table's columns */
+ if (!(rc= file->ha_rnd_init(TRUE)))
+ {
+ DEBUG_SYNC(table->in_use, "statistics_collection_start");
+
+ while ((rc= file->ha_rnd_next(table->record[0])) != HA_ERR_END_OF_FILE)
+ {
+ if (thd->killed)
+ break;
+
+ if (rc)
+ {
+ if (rc == HA_ERR_RECORD_DELETED)
+ continue;
+ break;
+ }
+
+ for (field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ table_field= *field_ptr;
+ if (!bitmap_is_set(table->read_set, table_field->field_index))
+ continue;
+ table_field->collected_stats->add(rows);
+ }
+ rows++;
+ }
+ file->ha_rnd_end();
+ }
+ rc= (rc == HA_ERR_END_OF_FILE && !thd->killed) ? 0 : 1;
+
+ /*
+ Calculate values for all statistical characteristics on columns and
+ and for each field f of 'table' save them in the write_stat structure
+ from the Field object for f.
+ */
+ if (!rc)
+ {
+ table->collected_stats->cardinality_is_null= FALSE;
+ table->collected_stats->cardinality= rows;
+ }
+
+ for (field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ table_field= *field_ptr;
+ if (!bitmap_is_set(table->read_set, table_field->field_index))
+ continue;
+ if (!rc)
+ table_field->collected_stats->finish(rows);
+ else
+ table_field->collected_stats->cleanup();
+ }
+
+ if (!rc)
+ {
+ uint key;
+ key_map::Iterator it(table->keys_in_use_for_query);
+
+ MY_BITMAP *save_read_set= table->read_set;
+ table->read_set= &table->tmp_set;
+ bitmap_set_all(table->read_set);
+
+ /* Collect statistics for indexes */
+ while ((key= it++) != key_map::Iterator::BITMAP_END)
+ {
+ if ((rc= collect_statistics_for_index(thd, table, key)))
+ break;
+ }
+
+ table->read_set= save_read_set;
+ }
+
+ DBUG_RETURN(rc);
+}
+
+
+/**
+ @brief
+ Update statistics for a table in the persistent statistical tables
+
+ @param
+ thd The thread handle
+ @param
+ table The table to collect statistics on
+
+ @details
+ For each statistical table st the function looks for the rows from this
+ table that contain statistical data on 'table'. If rows with given
+ statistical characteristics exist they are updated with the new statistical
+ values taken from internal structures for 'table'. Otherwise new rows
+ with these statistical characteristics are added into st.
+ It is assumed that values stored in the statistical tables are found and
+ saved by the function collect_statistics_for_table.
+
+ @retval
+ 0 If all statistical tables has been successfully updated
+ @retval
+ 1 Otherwise
+
+ @note
+ The function is called when executing the ANALYZE actions for 'table'.
+ The function first unlocks the opened table the statistics on which has
+ been collected, but does not closes it, so all collected statistical data
+ remains in internal structures for 'table'. Then the function opens the
+ statistical tables and writes the statistical data for 'table'into them.
+ It is not allowed just to open statistical tables for writing when some
+ other tables are locked for reading.
+ After the statistical tables have been opened they are updated one by one
+ with the new statistics on 'table'. Objects of the helper classes
+ Table_stat, Column_stat and Index_stat are employed for this.
+ After having been updated the statistical system tables are closed.
+*/
+
+int update_statistics_for_table(THD *thd, TABLE *table)
+{
+ TABLE_LIST tables[STATISTICS_TABLES];
+ Open_tables_backup open_tables_backup;
+ uint i;
+ int err;
+ enum_binlog_format save_binlog_format;
+ int rc= 0;
+ TABLE *stat_table;
+
+ DBUG_ENTER("update_statistics_for_table");
+
+ DEBUG_SYNC(thd, "statistics_update_start");
+
+ if (open_stat_tables(thd, tables, &open_tables_backup, TRUE))
+ {
+ thd->clear_error();
+ DBUG_RETURN(rc);
+ }
+
+ save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
+
+ /* Update the statistical table table_stats */
+ stat_table= tables[TABLE_STAT].table;
+ Table_stat table_stat(stat_table, table);
+ restore_record(stat_table, s->default_values);
+ table_stat.set_key_fields();
+ err= table_stat.update_stat();
+ if (err)
+ rc= 1;
+
+ /* Update the statistical table colum_stats */
+ stat_table= tables[COLUMN_STAT].table;
+ Column_stat column_stat(stat_table, table);
+ for (Field **field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ Field *table_field= *field_ptr;
+ if (!bitmap_is_set(table->read_set, table_field->field_index))
+ continue;
+ restore_record(stat_table, s->default_values);
+ column_stat.set_key_fields(table_field);
+ err= column_stat.update_stat();
+ if (err && !rc)
+ rc= 1;
+ }
+
+ /* Update the statistical table index_stats */
+ stat_table= tables[INDEX_STAT].table;
+ uint key;
+ key_map::Iterator it(table->keys_in_use_for_query);
+ Index_stat index_stat(stat_table, table);
+
+ while ((key= it++) != key_map::Iterator::BITMAP_END)
+ {
+ KEY *key_info= table->key_info+key;
+ uint key_parts= table->actual_n_key_parts(key_info);
+ for (i= 0; i < key_parts; i++)
+ {
+ restore_record(stat_table, s->default_values);
+ index_stat.set_key_fields(key_info, i+1);
+ err= index_stat.update_stat();
+ if (err && !rc)
+ rc= 1;
+ }
+ }
+
+ thd->restore_stmt_binlog_format(save_binlog_format);
+
+ close_system_tables(thd, &open_tables_backup);
+
+ DBUG_RETURN(rc);
+}
+
+
+/**
+ @brief
+ Read statistics for a table from the persistent statistical tables
+
+ @param
+ thd The thread handle
+ @param
+ table The table to read statistics on
+ @param
+ stat_tables The array of TABLE_LIST objects for statistical tables
+
+ @details
+ For each statistical table the function looks for the rows from this
+ table that contain statistical data on 'table'. If such rows is found
+ the data from statistical columns of it is read into the appropriate
+ fields of internal structures for 'table'. Later at the query processing
+ this data are supposed to be used by the optimizer.
+ The parameter stat_tables should point to an array of TABLE_LIST
+ objects for all statistical tables linked into a list. All statistical
+ tables are supposed to be opened.
+ The function is called by read_statistics_for_tables_if_needed().
+
+ @retval
+ 0 If data has been successfully read for the table
+ @retval
+ 1 Otherwise
+
+ @note
+ Objects of the helper classes Table_stat, Column_stat and Index_stat
+ are employed to read statistical data from the statistical tables.
+ now.
+*/
+
+static
+int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables)
+{
+ uint i;
+ TABLE *stat_table;
+ Field *table_field;
+ Field **field_ptr;
+ KEY *key_info, *key_info_end;
+ TABLE_SHARE *table_share= table->s;
+
+ DBUG_ENTER("read_statistics_for_table");
+
+ /* Read statistics from the statistical table table_stats */
+ stat_table= stat_tables[TABLE_STAT].table;
+ Table_stat table_stat(stat_table, table);
+ table_stat.set_key_fields();
+ table_stat.get_stat_values();
+
+ /* Read statistics from the statistical table column_stats */
+ stat_table= stat_tables[COLUMN_STAT].table;
+ Column_stat column_stat(stat_table, table);
+ for (field_ptr= table_share->field; *field_ptr; field_ptr++)
+ {
+ table_field= *field_ptr;
+ column_stat.set_key_fields(table_field);
+ column_stat.get_stat_values();
+ }
+
+ /* Read statistics from the statistical table index_stats */
+ Table_statistics *read_stats= table_share->stats_cb.table_stats;
+ stat_table= stat_tables[INDEX_STAT].table;
+ Index_stat index_stat(stat_table, table);
+ for (key_info= table_share->key_info,
+ key_info_end= key_info + table_share->keys;
+ key_info < key_info_end; key_info++)
+ {
+ uint key_parts= key_info->ext_key_parts;
+ for (i= 0; i < key_parts; i++)
+ {
+ index_stat.set_key_fields(key_info, i+1);
+ index_stat.get_stat_values();
+ }
+
+ key_part_map ext_key_part_map= key_info->ext_key_part_map;
+ if (key_info->key_parts != key_info->ext_key_parts &&
+ key_info->read_stats->get_avg_frequency(key_info->key_parts) == 0)
+ {
+ KEY *pk_key_info= table_share->key_info + table_share->primary_key;
+ uint k= key_info->key_parts;
+ uint pk_parts= pk_key_info->key_parts;
+ ha_rows n_rows= read_stats->cardinality;
+ double k_dist= n_rows / key_info->read_stats->get_avg_frequency(k-1);
+ uint m= 0;
+ for (uint j= 0; j < pk_parts; j++)
+ {
+ if (!(ext_key_part_map & 1 << j))
+ {
+ for (uint l= k; l < k + m; l++)
+ {
+ double avg_frequency=
+ pk_key_info->read_stats->get_avg_frequency(j-1);
+ set_if_smaller(avg_frequency, 1);
+ double val= pk_key_info->read_stats->get_avg_frequency(j) /
+ avg_frequency;
+ key_info->read_stats->set_avg_frequency (l, val);
+ }
+ }
+ else
+ {
+ double avg_frequency= pk_key_info->read_stats->get_avg_frequency(j);
+ key_info->read_stats->set_avg_frequency(k + m, avg_frequency);
+ m++;
+ }
+ }
+ for (uint l= k; l < k + m; l++)
+ {
+ double avg_frequency= key_info->read_stats->get_avg_frequency(l);
+ if (avg_frequency == 0 || read_stats->cardinality_is_null)
+ avg_frequency= 1;
+ else if (avg_frequency > 1)
+ {
+ avg_frequency/= k_dist;
+ set_if_bigger(avg_frequency, 1);
+ }
+ key_info->read_stats->set_avg_frequency(l, avg_frequency);
+ }
+ }
+ }
+
+ table->stats_is_read= TRUE;
+
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief
+ Check whether any statistics is to be read for tables from a table list
+
+ @param
+ thd The thread handle
+ @param
+ tables The tables list for whose tables the check is to be done
+
+ @details
+ The function checks whether for any of the tables opened and locked for
+ a statement statistics from statistical tables is needed to be read.
+
+ @retval
+ TRUE statistics for any of the tables is needed to be read
+ @retval
+ FALSE Otherwise
+*/
+
+static
+bool statistics_for_tables_is_needed(THD *thd, TABLE_LIST *tables)
+{
+ if (!tables)
+ return FALSE;
+
+ if (!statistics_for_command_is_needed(thd))
+ return FALSE;
+
+ /*
+ Do not read statistics for any query over non-user tables.
+ If the query references some statistical tables, but not all
+ of them, reading the statistics may lead to a deadlock
+ */
+ for (TABLE_LIST *tl= tables; tl; tl= tl->next_global)
+ {
+ if (!tl->is_view_or_derived() && tl->table)
+ {
+ TABLE_SHARE *table_share= tl->table->s;
+ if (table_share &&
+ (table_share->table_category != TABLE_CATEGORY_USER ||
+ table_share->tmp_table != NO_TMP_TABLE))
+ return FALSE;
+ }
+ }
+
+ for (TABLE_LIST *tl= tables; tl; tl= tl->next_global)
+ {
+ if (!tl->is_view_or_derived() && tl->table)
+ {
+ TABLE_SHARE *table_share= tl->table->s;
+ if (table_share &&
+ table_share->stats_cb.stats_can_be_read &&
+ !table_share->stats_cb.stats_is_read)
+ return TRUE;
+ if (table_share->stats_cb.stats_is_read)
+ tl->table->stats_is_read= TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+
+/**
+ @brief
+ Read statistics for tables from a table list if it is needed
+
+ @param
+ thd The thread handle
+ @param
+ tables The tables list for whose tables to read statistics
+
+ @details
+ The function first checks whether for any of the tables opened and locked
+ for a statement statistics from statistical tables is needed to be read.
+ Then, if so, it opens system statistical tables for read and reads
+ the statistical data from them for those tables from the list for which it
+ makes sense. Then the function closes system statistical tables.
+
+ @retval
+ 0 Statistics for tables was successfully read
+ @retval
+ 1 Otherwise
+*/
+
+int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables)
+{
+ TABLE_LIST stat_tables[STATISTICS_TABLES];
+ Open_tables_backup open_tables_backup;
+
+ DBUG_ENTER("read_statistics_for_table_if_needed");
+
+ DEBUG_SYNC(thd, "statistics_read_start");
+
+ if (!statistics_for_tables_is_needed(thd, tables))
+ DBUG_RETURN(0);
+
+ if (open_stat_tables(thd, stat_tables, &open_tables_backup, FALSE))
+ {
+ thd->clear_error();
+ DBUG_RETURN(1);
+ }
+
+ for (TABLE_LIST *tl= tables; tl; tl= tl->next_global)
+ {
+ if (!tl->is_view_or_derived() && tl->table)
+ {
+ TABLE_SHARE *table_share= tl->table->s;
+ if (table_share &&
+ table_share->stats_cb.stats_can_be_read &&
+ !table_share->stats_cb.stats_is_read)
+ {
+ (void) read_statistics_for_table(thd, tl->table, stat_tables);
+ table_share->stats_cb.stats_is_read= TRUE;
+ }
+ if (table_share->stats_cb.stats_is_read)
+ tl->table->stats_is_read= TRUE;
+ }
+ }
+
+ close_system_tables(thd, &open_tables_backup);
+
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief
+ Delete statistics on a table from all statistical tables
+
+ @param
+ thd The thread handle
+ @param
+ db The name of the database the table belongs to
+ @param
+ tab The name of the table whose statistics is to be deleted
+
+ @details
+ The function delete statistics on the table called 'tab' of the database
+ 'db' from all statistical tables: table_stats, column_stats, index_stats.
+
+ @retval
+ 0 If all deletions are successful
+ @retval
+ 1 Otherwise
+
+ @note
+ The function is called when executing the statement DROP TABLE 'tab'.
+*/
+
+int delete_statistics_for_table(THD *thd, LEX_STRING *db, LEX_STRING *tab)
+{
+ int err;
+ enum_binlog_format save_binlog_format;
+ TABLE *stat_table;
+ TABLE_LIST tables[STATISTICS_TABLES];
+ Open_tables_backup open_tables_backup;
+ int rc= 0;
+
+ DBUG_ENTER("delete_statistics_for_table");
+
+ if (open_stat_tables(thd, tables, &open_tables_backup, TRUE))
+ {
+ thd->clear_error();
+ DBUG_RETURN(rc);
+ }
+
+ save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
+
+ /* Delete statistics on table from the statistical table index_stats */
+ stat_table= tables[INDEX_STAT].table;
+ Index_stat index_stat(stat_table, db, tab);
+ index_stat.set_full_table_name();
+ while (index_stat.find_next_stat_for_prefix(2))
+ {
+ err= index_stat.delete_stat();
+ if (err & !rc)
+ rc= 1;
+ }
+
+ /* Delete statistics on table from the statistical table column_stats */
+ stat_table= tables[COLUMN_STAT].table;
+ Column_stat column_stat(stat_table, db, tab);
+ column_stat.set_full_table_name();
+ while (column_stat.find_next_stat_for_prefix(2))
+ {
+ err= column_stat.delete_stat();
+ if (err & !rc)
+ rc= 1;
+ }
+
+ /* Delete statistics on table from the statistical table table_stats */
+ stat_table= tables[TABLE_STAT].table;
+ Table_stat table_stat(stat_table, db, tab);
+ table_stat.set_key_fields();
+ if (table_stat.find_stat())
+ {
+ err= table_stat.delete_stat();
+ if (err & !rc)
+ rc= 1;
+ }
+
+ thd->restore_stmt_binlog_format(save_binlog_format);
+
+ close_system_tables(thd, &open_tables_backup);
+
+ DBUG_RETURN(rc);
+}
+
+
+/**
+ @brief
+ Delete statistics on a column of the specified table
+
+ @param
+ thd The thread handle
+ @param
+ tab The table the column belongs to
+ @param
+ col The field of the column whose statistics is to be deleted
+
+ @details
+ The function delete statistics on the column 'col' belonging to the table
+ 'tab' from the statistical table column_stats.
+
+ @retval
+ 0 If the deletion is successful
+ @retval
+ 1 Otherwise
+
+ @note
+ The function is called when dropping a table column or when changing
+ the definition of this column.
+*/
+
+int delete_statistics_for_column(THD *thd, TABLE *tab, Field *col)
+{
+ int err;
+ enum_binlog_format save_binlog_format;
+ TABLE *stat_table;
+ TABLE_LIST tables;
+ Open_tables_backup open_tables_backup;
+ int rc= 0;
+
+ DBUG_ENTER("delete_statistics_for_column");
+
+ if (open_single_stat_table(thd, &tables, &stat_table_name[1],
+ &open_tables_backup, TRUE))
+ {
+ thd->clear_error();
+ DBUG_RETURN(rc);
+ }
+
+ save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
+
+ stat_table= tables.table;
+ Column_stat column_stat(stat_table, tab);
+ column_stat.set_key_fields(col);
+ if (column_stat.find_stat())
+ {
+ err= column_stat.delete_stat();
+ if (err)
+ rc= 1;
+ }
+
+ thd->restore_stmt_binlog_format(save_binlog_format);
+
+ close_system_tables(thd, &open_tables_backup);
+
+ DBUG_RETURN(rc);
+}
+
+
+/**
+ @brief
+ Delete statistics on an index of the specified table
+
+ @param
+ thd The thread handle
+ @param
+ tab The table the index belongs to
+ @param
+ key_info The descriptor of the index whose statistics is to be deleted
+ @param
+ ext_prefixes_only Delete statistics only on the index prefixes extended by
+ the components of the primary key
+
+ @details
+ The function delete statistics on the index specified by 'key_info'
+ defined on the table 'tab' from the statistical table index_stats.
+
+ @retval
+ 0 If the deletion is successful
+ @retval
+ 1 Otherwise
+
+ @note
+ The function is called when dropping an index, or dropping/changing the
+ definition of a column used in the definition of the index.
+*/
+
+int delete_statistics_for_index(THD *thd, TABLE *tab, KEY *key_info,
+ bool ext_prefixes_only)
+{
+ int err;
+ enum_binlog_format save_binlog_format;
+ TABLE *stat_table;
+ TABLE_LIST tables;
+ Open_tables_backup open_tables_backup;
+ int rc= 0;
+
+ DBUG_ENTER("delete_statistics_for_index");
+
+ if (open_single_stat_table(thd, &tables, &stat_table_name[2],
+ &open_tables_backup, TRUE))
+ {
+ thd->clear_error();
+ DBUG_RETURN(rc);
+ }
+
+ save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
+
+ stat_table= tables.table;
+ Index_stat index_stat(stat_table, tab);
+ if (!ext_prefixes_only)
+ {
+ index_stat.set_index_prefix_key_fields(key_info);
+ while (index_stat.find_next_stat_for_prefix(3))
+ {
+ err= index_stat.delete_stat();
+ if (err && !rc)
+ rc= 1;
+ }
+ }
+ else
+ {
+ for (uint i= key_info->key_parts; i < key_info->ext_key_parts; i++)
+ {
+ index_stat.set_key_fields(key_info, i+1);
+ if (index_stat.find_next_stat_for_prefix(4))
+ {
+ err= index_stat.delete_stat();
+ if (err && !rc)
+ rc= 1;
+ }
+ }
+ }
+
+ thd->restore_stmt_binlog_format(save_binlog_format);
+
+ close_system_tables(thd, &open_tables_backup);
+
+ DBUG_RETURN(rc);
+}
+
+
+/**
+ @brief
+ Rename a table in all statistical tables
+
+ @param
+ thd The thread handle
+ @param
+ db The name of the database the table belongs to
+ @param
+ tab The name of the table to be renamed in statistical tables
+ @param
+ new_tab The new name of the table
+
+ @details
+ The function replaces the name of the table 'tab' from the database 'db'
+ for 'new_tab' in all all statistical tables: table_stats, column_stats,
+ index_stats.
+
+ @retval
+ 0 If all updates of the table name are successful
+ @retval
+ 1 Otherwise
+
+ @note
+ The function is called when executing any statement that renames a table
+*/
+
+int rename_table_in_stat_tables(THD *thd, LEX_STRING *db, LEX_STRING *tab,
+ LEX_STRING *new_db, LEX_STRING *new_tab)
+{
+ int err;
+ enum_binlog_format save_binlog_format;
+ TABLE *stat_table;
+ TABLE_LIST tables[STATISTICS_TABLES];
+ Open_tables_backup open_tables_backup;
+ int rc= 0;
+
+ DBUG_ENTER("rename_table_in_stat_tables");
+
+ if (open_stat_tables(thd, tables, &open_tables_backup, TRUE))
+ {
+ thd->clear_error();
+ DBUG_RETURN(rc);
+ }
+
+ save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
+
+ /* Rename table in the statistical table index_stats */
+ stat_table= tables[INDEX_STAT].table;
+ Index_stat index_stat(stat_table, db, tab);
+ index_stat.set_full_table_name();
+ while (index_stat.find_next_stat_for_prefix(2))
+ {
+ err= index_stat.update_table_name_key_parts(new_db, new_tab);
+ if (err & !rc)
+ rc= 1;
+ index_stat.set_full_table_name();
+ }
+
+ /* Rename table in the statistical table column_stats */
+ stat_table= tables[COLUMN_STAT].table;
+ Column_stat column_stat(stat_table, db, tab);
+ column_stat.set_full_table_name();
+ while (column_stat.find_next_stat_for_prefix(2))
+ {
+ err= column_stat.update_table_name_key_parts(new_db, new_tab);
+ if (err & !rc)
+ rc= 1;
+ column_stat.set_full_table_name();
+ }
+
+ /* Rename table in the statistical table table_stats */
+ stat_table= tables[TABLE_STAT].table;
+ Table_stat table_stat(stat_table, db, tab);
+ table_stat.set_key_fields();
+ if (table_stat.find_stat())
+ {
+ err= table_stat.update_table_name_key_parts(new_db, new_tab);
+ if (err & !rc)
+ rc= 1;
+ }
+
+ thd->restore_stmt_binlog_format(save_binlog_format);
+
+ close_system_tables(thd, &open_tables_backup);
+
+ DBUG_RETURN(rc);
+}
+
+
+/**
+ @brief
+ Rename a column in the statistical table column_stats
+
+ @param
+ thd The thread handle
+ @param
+ tab The table the column belongs to
+ @param
+ col The column to be renamed
+ @param
+ new_name The new column name
+
+ @details
+ The function replaces the name of the column 'col' belonging to the table
+ 'tab' for 'new_name' in the statistical table column_stats.
+
+ @retval
+ 0 If all updates of the table name are successful
+ @retval
+ 1 Otherwise
+
+ @note
+ The function is called when executing any statement that renames a column,
+ but does not change the column definition.
+*/
+
+int rename_column_in_stat_tables(THD *thd, TABLE *tab, Field *col,
+ const char *new_name)
+{
+ int err;
+ enum_binlog_format save_binlog_format;
+ TABLE *stat_table;
+ TABLE_LIST tables;
+ Open_tables_backup open_tables_backup;
+ int rc= 0;
+
+ DBUG_ENTER("rename_column_in_stat_tables");
+
+ if (open_single_stat_table(thd, &tables, &stat_table_name[1],
+ &open_tables_backup, TRUE))
+ {
+ thd->clear_error();
+ DBUG_RETURN(rc);
+ }
+
+ save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
+
+ /* Rename column in the statistical table table_stat */
+ stat_table= tables.table;
+ Column_stat column_stat(stat_table, tab);
+ column_stat.set_key_fields(col);
+ if (column_stat.find_stat())
+ {
+ err= column_stat.update_column_key_part(new_name);
+ if (err & !rc)
+ rc= 1;
+ }
+
+ thd->restore_stmt_binlog_format(save_binlog_format);
+
+ close_system_tables(thd, &open_tables_backup);
+
+ DBUG_RETURN(rc);
+}
+
+
+/**
+ @brief
+ Set statistics for a table that will be used by the optimizer
+
+ @param
+ thd The thread handle
+ @param
+ table The table to set statistics for
+
+ @details
+ Depending on the value of thd->variables.use_stat_tables
+ the function performs the settings for the table that will control
+ from where the statistical data used by the optimizer will be taken.
+*/
+
+void set_statistics_for_table(THD *thd, TABLE *table)
+{
+ TABLE_STATISTICS_CB *stats_cb= &table->s->stats_cb;
+ Table_statistics *read_stats= stats_cb->table_stats;
+ Use_stat_tables_mode use_stat_table_mode= get_use_stat_tables_mode(thd);
+ table->used_stat_records=
+ (use_stat_table_mode <= COMPLEMENTARY ||
+ !table->stats_is_read || read_stats->cardinality_is_null) ?
+ table->file->stats.records : read_stats->cardinality;
+ KEY *key_info, *key_info_end;
+ for (key_info= table->key_info, key_info_end= key_info+table->s->keys;
+ key_info < key_info_end; key_info++)
+ {
+ key_info->is_statistics_from_stat_tables=
+ (use_stat_table_mode > COMPLEMENTARY &&
+ table->stats_is_read &&
+ key_info->read_stats->avg_frequency_is_inited() &&
+ key_info->read_stats->get_avg_frequency(0) > 0.5);
+ }
+}
diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h
new file mode 100644
index 00000000000..17f22cec4e5
--- /dev/null
+++ b/sql/sql_statistics.h
@@ -0,0 +1,248 @@
+/* Copyright 2006-2008 MySQL AB, 2008 Sun Microsystems, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef SQL_STATISTICS_H
+#define SQL_STATISTICS_H
+
+/*
+ These enumeration types comprise the dictionary of three
+ statistical tables table_stat, column_stat and index_stat
+ as they defined in ../scripts/mysql_system_tables.sql.
+
+ It would be nice if the declarations of these types were
+ generated automatically by the table definitions.
+*/
+
+typedef
+enum enum_use_stat_tables_mode
+{
+ NEVER,
+ COMPLEMENTARY,
+ PEFERABLY,
+} Use_stat_tables_mode;
+
+enum enum_stat_tables
+{
+ TABLE_STAT,
+ COLUMN_STAT,
+ INDEX_STAT,
+};
+
+enum enum_table_stat_col
+{
+ TABLE_STAT_DB_NAME,
+ TABLE_STAT_TABLE_NAME,
+ TABLE_STAT_CARDINALITY
+};
+
+enum enum_column_stat_col
+{
+ COLUMN_STAT_DB_NAME,
+ COLUMN_STAT_TABLE_NAME,
+ COLUMN_STAT_COLUMN_NAME,
+ COLUMN_STAT_MIN_VALUE,
+ COLUMN_STAT_MAX_VALUE,
+ COLUMN_STAT_NULLS_RATIO,
+ COLUMN_STAT_AVG_LENGTH,
+ COLUMN_STAT_AVG_FREQUENCY
+};
+
+enum enum_index_stat_col
+{
+ INDEX_STAT_DB_NAME,
+ INDEX_STAT_TABLE_NAME,
+ INDEX_STAT_INDEX_NAME,
+ INDEX_STAT_PREFIX_ARITY,
+ INDEX_STAT_AVG_FREQUENCY
+};
+
+inline
+Use_stat_tables_mode get_use_stat_tables_mode(THD *thd)
+{
+ return (Use_stat_tables_mode) (thd->variables.use_stat_tables);
+}
+
+int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables);
+int collect_statistics_for_table(THD *thd, TABLE *table);
+int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *share,
+ bool is_safe);
+int alloc_statistics_for_table(THD *thd, TABLE *table);
+int update_statistics_for_table(THD *thd, TABLE *table);
+int delete_statistics_for_table(THD *thd, LEX_STRING *db, LEX_STRING *tab);
+int delete_statistics_for_column(THD *thd, TABLE *tab, Field *col);
+int delete_statistics_for_index(THD *thd, TABLE *tab, KEY *key_info,
+ bool ext_prefixes_only);
+int rename_table_in_stat_tables(THD *thd, LEX_STRING *db, LEX_STRING *tab,
+ LEX_STRING *new_db, LEX_STRING *new_tab);
+int rename_column_in_stat_tables(THD *thd, TABLE *tab, Field *col,
+ const char *new_name);
+void set_statistics_for_table(THD *thd, TABLE *table);
+
+class Columns_statistics;
+class Index_statistics;
+
+
+/* Statistical data on a table */
+
+class Table_statistics
+{
+
+public:
+ my_bool cardinality_is_null; /* TRUE if the cardinality is unknown */
+ ha_rows cardinality; /* Number of rows in the table */
+ uchar *min_max_record_buffers; /* Record buffers for min/max values */
+ Column_statistics *column_stats; /* Array of statistical data for columns */
+ Index_statistics *index_stats; /* Array of statistical data for indexes */
+ ulong *idx_avg_frequency; /* Array of records per key for index prefixes */
+
+};
+
+
+/* Statistical data on a column */
+
+class Column_statistics
+{
+
+private:
+ static const uint Scale_factor_nulls_ratio= 100000;
+ static const uint Scale_factor_avg_length= 100000;
+ static const uint Scale_factor_avg_frequency= 100000;
+
+public:
+ /*
+ Bitmap indicating what statistical characteristics
+ are available for the column
+ */
+ uint32 column_stat_nulls;
+
+ /* Minimum value for the column */
+ Field *min_value;
+ /* Maximum value for the column */
+ Field *max_value;
+
+private:
+
+ /*
+ The ratio Z/N multiplied by the scale factor Scale_factor_nulls_ratio,
+ where
+ N is the total number of rows,
+ Z is the number of nulls in the column
+ */
+ ulong nulls_ratio;
+
+ /*
+ Average number of bytes occupied by the representation of a
+ value of the column in memory buffers such as join buffer
+ multiplied by the scale factor Scale_factor_avg_length.
+ CHAR values are stripped of trailing spaces.
+ Flexible values are stripped of their length prefixes.
+ */
+ ulong avg_length;
+
+ /*
+ The ratio N/D multiplied by the scale factor Scale_factor_avg_frequency,
+ where
+ N is the number of rows with not null value in the column,
+ D the number of distinct values among them
+ */
+ ulong avg_frequency;
+
+public:
+
+ void set_all_nulls()
+ {
+ column_stat_nulls=
+ ((1 << (COLUMN_STAT_AVG_FREQUENCY-COLUMN_STAT_COLUMN_NAME))-1) <<
+ (COLUMN_STAT_COLUMN_NAME+1);
+ }
+
+ void set_not_null(uint stat_field_no)
+ {
+ column_stat_nulls&= ~(1 << stat_field_no);
+ }
+
+ bool is_null(uint stat_field_no)
+ {
+ return test(column_stat_nulls & (1 << stat_field_no));
+ }
+
+ double get_nulls_ratio()
+ {
+ return (double) nulls_ratio / Scale_factor_nulls_ratio;
+ }
+
+ double get_avg_length()
+ {
+ return (double) avg_length / Scale_factor_avg_length;
+ }
+
+ double get_avg_frequency()
+ {
+ return (double) avg_frequency / Scale_factor_avg_frequency;
+ }
+
+ void set_nulls_ratio (double val)
+ {
+ nulls_ratio= (ulong) (val * Scale_factor_nulls_ratio);
+ }
+
+ void set_avg_length (double val)
+ {
+ avg_length= (ulong) (val * Scale_factor_avg_length);
+ }
+
+ void set_avg_frequency (double val)
+ {
+ avg_frequency= (ulong) (val * Scale_factor_avg_frequency);
+ }
+
+};
+
+
+/* Statistical data on an index prefixes */
+
+class Index_statistics
+{
+
+private:
+ static const uint Scale_factor_avg_frequency= 100000;
+ /*
+ The k-th element of this array contains the ratio N/D
+ multiplied by the scale factor Scale_factor_avg_frequency,
+ where N is the number of index entries without nulls
+ in the first k components, and D is the number of distinct
+ k-component prefixes among them
+ */
+ ulong *avg_frequency;
+
+public:
+
+ void init_avg_frequency(ulong *ptr) { avg_frequency= ptr; }
+
+ bool avg_frequency_is_inited() { return avg_frequency != NULL; }
+
+ double get_avg_frequency(uint i)
+ {
+ return (double) avg_frequency[i] / Scale_factor_avg_frequency;
+ }
+
+ void set_avg_frequency(uint i, double val)
+ {
+ avg_frequency[i]= (ulong) (val * Scale_factor_avg_frequency);
+ }
+
+};
+
+#endif /* SQL_STATISTICS_H */
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index 75029a03790..9d11677666f 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -42,7 +42,9 @@ bool String::real_alloc(uint32 length)
if (Alloced_length < arg_length)
{
free();
- if (!(Ptr=(char*) my_malloc(arg_length,MYF(MY_WME))))
+ if (!(Ptr=(char*) my_malloc(arg_length,MYF(MY_WME |
+ (thread_specific ?
+ MY_THREAD_SPECIFIC : 0)))))
return TRUE;
Alloced_length=arg_length;
alloced=1;
@@ -90,10 +92,16 @@ bool String::realloc_raw(uint32 alloc_length)
return TRUE; /* Overflow */
if (alloced)
{
- if (!(new_ptr= (char*) my_realloc(Ptr,len,MYF(MY_WME))))
+ if (!(new_ptr= (char*) my_realloc(Ptr,len,
+ MYF(MY_WME |
+ (thread_specific ?
+ MY_THREAD_SPECIFIC : 0)))))
return TRUE; // Signal error
}
- else if ((new_ptr= (char*) my_malloc(len,MYF(MY_WME))))
+ else if ((new_ptr= (char*) my_malloc(len,
+ MYF(MY_WME |
+ (thread_specific ?
+ MY_THREAD_SPECIFIC : 0)))))
{
if (str_length > len - 1)
str_length= 0;
@@ -502,6 +510,24 @@ bool String::append(IO_CACHE* file, uint32 arg_length)
return FALSE;
}
+
+/**
+ Append a parenthesized number to String.
+ Used in various pieces of SHOW related code.
+
+ @param nr Number
+ @param radix Radix, optional parameter, 10 by default.
+*/
+bool String::append_parenthesized(long nr, int radix)
+{
+ char buff[64], *end;
+ buff[0]= '(';
+ end= int10_to_str(nr, buff + 1, radix);
+ *end++ = ')';
+ return append(buff, (uint) (end - buff));
+}
+
+
bool String::append_with_prefill(const char *s,uint32 arg_length,
uint32 full_length, char fill_char)
{
@@ -750,79 +776,6 @@ String *copy_if_not_alloced(String *to,String *from,uint32 from_length)
Help functions
****************************************************************************/
-/*
- copy a string from one character set to another
-
- SYNOPSIS
- copy_and_convert()
- to Store result here
- to_cs Character set of result string
- from Copy from here
- from_length Length of from string
- from_cs From character set
-
- NOTES
- 'to' must be big enough as form_length * to_cs->mbmaxlen
-
- RETURN
- length of bytes copied to 'to'
-*/
-
-
-static uint32
-copy_and_convert_extended(char *to, uint32 to_length, CHARSET_INFO *to_cs,
- const char *from, uint32 from_length,
- CHARSET_INFO *from_cs,
- uint *errors)
-{
- int cnvres;
- my_wc_t wc;
- const uchar *from_end= (const uchar*) from+from_length;
- char *to_start= to;
- uchar *to_end= (uchar*) to+to_length;
- my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc;
- my_charset_conv_wc_mb wc_mb= to_cs->cset->wc_mb;
- uint error_count= 0;
-
- while (1)
- {
- if ((cnvres= (*mb_wc)(from_cs, &wc, (uchar*) from,
- from_end)) > 0)
- from+= cnvres;
- else if (cnvres == MY_CS_ILSEQ)
- {
- error_count++;
- from++;
- wc= '?';
- }
- else if (cnvres > MY_CS_TOOSMALL)
- {
- /*
- A correct multibyte sequence detected
- But it doesn't have Unicode mapping.
- */
- error_count++;
- from+= (-cnvres);
- wc= '?';
- }
- else
- break; // Not enough characters
-
-outp:
- if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0)
- to+= cnvres;
- else if (cnvres == MY_CS_ILUNI && wc != '?')
- {
- error_count++;
- wc= '?';
- goto outp;
- }
- else
- break;
- }
- *errors= error_count;
- return (uint32) (to - to_start);
-}
/*
diff --git a/sql/sql_string.h b/sql/sql_string.h
index 8b09d449c2b..58cda343dac 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -56,23 +56,26 @@ class String
{
char *Ptr;
uint32 str_length,Alloced_length, extra_alloc;
- bool alloced;
+ bool alloced,thread_specific;
CHARSET_INFO *str_charset;
public:
String()
{
- Ptr=0; str_length=Alloced_length=extra_alloc=0; alloced=0;
+ Ptr=0; str_length=Alloced_length=extra_alloc=0;
+ alloced= thread_specific= 0;
str_charset= &my_charset_bin;
}
String(uint32 length_arg)
{
- alloced=0; Alloced_length= extra_alloc= 0; (void) real_alloc(length_arg);
+ alloced= thread_specific= 0;
+ Alloced_length= extra_alloc= 0; (void) real_alloc(length_arg);
str_charset= &my_charset_bin;
}
String(const char *str, CHARSET_INFO *cs)
{
Ptr=(char*) str; str_length= (uint32) strlen(str);
- Alloced_length= extra_alloc= 0; alloced=0;
+ Alloced_length= extra_alloc= 0;
+ alloced= thread_specific= 0;
str_charset=cs;
}
/*
@@ -82,18 +85,21 @@ public:
*/
String(const char *str,uint32 len, CHARSET_INFO *cs)
{
- Ptr=(char*) str; str_length=len; Alloced_length= extra_alloc=0; alloced=0;
+ Ptr=(char*) str; str_length=len; Alloced_length= extra_alloc=0;
+ alloced= thread_specific= 0;
str_charset=cs;
}
String(char *str,uint32 len, CHARSET_INFO *cs)
{
- Ptr=(char*) str; Alloced_length=str_length=len; extra_alloc= 0; alloced=0;
+ Ptr=(char*) str; Alloced_length=str_length=len; extra_alloc= 0;
+ alloced= thread_specific= 0;
str_charset=cs;
}
String(const String &str)
{
Ptr=str.Ptr ; str_length=str.str_length ;
- Alloced_length=str.Alloced_length; extra_alloc= 0; alloced=0;
+ Alloced_length=str.Alloced_length; extra_alloc= 0;
+ alloced= thread_specific= 0;
str_charset=str.str_charset;
}
static void *operator new(size_t size, MEM_ROOT *mem_root) throw ()
@@ -108,6 +114,12 @@ public:
{ /* never called */ }
~String() { free(); }
+ /* Mark variable thread specific it it's not allocated already */
+ inline void set_thread_specific()
+ {
+ if (!alloced)
+ thread_specific= 1;
+ }
inline void set_charset(CHARSET_INFO *charset_arg)
{ str_charset= charset_arg; }
inline CHARSET_INFO *charset() const { return str_charset; }
@@ -332,6 +344,7 @@ public:
Ptr=s.Ptr ; str_length=s.str_length ; Alloced_length=s.Alloced_length;
extra_alloc= s.extra_alloc;
alloced= s.alloced;
+ thread_specific= s.thread_specific;
s.alloced= 0;
}
bool append(const String &s);
@@ -346,6 +359,7 @@ public:
bool append(IO_CACHE* file, uint32 arg_length);
bool append_with_prefill(const char *s, uint32 arg_length,
uint32 full_length, char fill_char);
+ bool append_parenthesized(long nr, int radix= 10);
int strstr(const String &search,uint32 offset=0); // Returns offset to substring or -1
int strrstr(const String &search,uint32 offset=0); // Returns offset to substring or -1
bool replace(uint32 offset,uint32 arg_length,const char *to,uint32 length);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 330c28ebbd8..d0a4aedfa4f 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -43,6 +43,7 @@
#include "discover.h" // readfrm
#include "my_pthread.h" // pthread_mutex_t
#include "log_event.h" // Query_log_event
+#include "sql_statistics.h"
#include <hash.h>
#include <myisam.h>
#include <my_dir.h>
@@ -960,7 +961,7 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
ddl_log_entry->handler_name));
handler_name.str= (char*)ddl_log_entry->handler_name;
handler_name.length= strlen(ddl_log_entry->handler_name);
- init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+ init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
if (!strcmp(ddl_log_entry->handler_name, reg_ext))
frm_action= TRUE;
else
@@ -1520,7 +1521,7 @@ void execute_ddl_log_recovery()
global_ddl_log.recovery_phase= FALSE;
delete thd;
/* Remember that we don't have a THD */
- my_pthread_setspecific_ptr(THR_THD, 0);
+ set_current_thd(0);
DBUG_VOID_RETURN;
}
@@ -1886,6 +1887,17 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
}
}
+ if (!in_bootstrap)
+ {
+ for (table= tables; table; table= table->next_local)
+ {
+ LEX_STRING db_name= { table->db, table->db_length };
+ LEX_STRING table_name= { table->table_name, table->table_name_length };
+ if (table->open_type == OT_BASE_ONLY || !find_temporary_table(thd, table))
+ (void) delete_statistics_for_table(thd, &db_name, &table_name);
+ }
+ }
+
mysql_ha_rm_tables(thd, tables);
if (!drop_temporary)
@@ -1896,6 +1908,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
MYSQL_OPEN_SKIP_TEMPORARY))
DBUG_RETURN(true);
for (table= tables; table; table= table->next_local)
+
tdc_remove_table(thd, TDC_RT_REMOVE_ALL, table->db, table->table_name,
false);
}
@@ -2607,7 +2620,6 @@ void calculate_interval_lengths(CHARSET_INFO *cs, TYPELIB *interval,
prepare_create_field()
sql_field field to prepare for packing
blob_columns count for BLOBs
- timestamps count for timestamps
table_flags table flags
DESCRIPTION
@@ -2621,7 +2633,6 @@ void calculate_interval_lengths(CHARSET_INFO *cs, TYPELIB *interval,
int prepare_create_field(Create_field *sql_field,
uint *blob_columns,
- int *timestamps, int *timestamps_with_niladic,
longlong table_flags)
{
unsigned int dup_val_count;
@@ -2743,21 +2754,6 @@ int prepare_create_field(Create_field *sql_field,
(sql_field->decimals << FIELDFLAG_DEC_SHIFT));
break;
case MYSQL_TYPE_TIMESTAMP:
- /* We should replace old TIMESTAMP fields with their newer analogs */
- if (sql_field->unireg_check == Field::TIMESTAMP_OLD_FIELD)
- {
- if (!*timestamps)
- {
- sql_field->unireg_check= Field::TIMESTAMP_DNUN_FIELD;
- (*timestamps_with_niladic)++;
- }
- else
- sql_field->unireg_check= Field::NONE;
- }
- else if (sql_field->unireg_check != Field::NONE)
- (*timestamps_with_niladic)++;
-
- (*timestamps)++;
/* fall-through */
default:
sql_field->pack_flag=(FIELDFLAG_NUMBER |
@@ -2829,6 +2825,40 @@ bool check_duplicate_warning(THD *thd, char *msg, ulong length)
}
+/**
+ Modifies the first column definition whose SQL type is TIMESTAMP
+ by adding the features DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP.
+
+ @param column_definitions The list of column definitions, in the physical
+ order in which they appear in the table.
+ */
+void promote_first_timestamp_column(List<Create_field> *column_definitions)
+{
+ List_iterator<Create_field> it(*column_definitions);
+ Create_field *column_definition;
+
+ while ((column_definition= it++) != NULL)
+ {
+ if (column_definition->sql_type == MYSQL_TYPE_TIMESTAMP || // TIMESTAMP
+ column_definition->unireg_check == Field::TIMESTAMP_OLD_FIELD) // Legacy
+ {
+ if ((column_definition->flags & NOT_NULL_FLAG) != 0 && // NOT NULL,
+ column_definition->def == NULL && // no constant default,
+ column_definition->unireg_check == Field::NONE) // no function default
+ {
+ DBUG_PRINT("info", ("First TIMESTAMP column '%s' was promoted to "
+ "DEFAULT CURRENT_TIMESTAMP ON UPDATE "
+ "CURRENT_TIMESTAMP",
+ column_definition->field_name
+ ));
+ column_definition->unireg_check= Field::TIMESTAMP_DNUN_FIELD;
+ }
+ return;
+ }
+ }
+}
+
+
/*
Preparation for table creation
@@ -2869,7 +2899,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
ulong record_offset= 0;
KEY *key_info;
KEY_PART_INFO *key_part_info;
- int timestamps= 0, timestamps_with_niladic= 0;
int field_no,dup_no;
int select_field_pos,auto_increment=0;
List_iterator<Create_field> it(alter_info->create_list);
@@ -3153,7 +3182,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
DBUG_ASSERT(sql_field->charset != 0);
if (prepare_create_field(sql_field, &blob_columns,
- &timestamps, &timestamps_with_niladic,
file->ha_table_flags()))
DBUG_RETURN(TRUE);
if (sql_field->sql_type == MYSQL_TYPE_VARCHAR)
@@ -3184,12 +3212,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
record_offset+= sql_field->pack_length;
}
}
- if (timestamps_with_niladic > 1)
- {
- my_message(ER_TOO_MUCH_AUTO_TIMESTAMP_COLS,
- ER(ER_TOO_MUCH_AUTO_TIMESTAMP_COLS), MYF(0));
- DBUG_RETURN(TRUE);
- }
if (auto_increment > 1)
{
my_message(ER_WRONG_AUTO_KEY, ER(ER_WRONG_AUTO_KEY), MYF(0));
@@ -4560,6 +4582,7 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
/* Got lock. */
DEBUG_SYNC(thd, "locked_table_name");
+ promote_first_timestamp_column(&alter_info->create_list);
result= mysql_create_table_no_lock(thd, create_table->db,
create_table->table_name, create_info,
alter_info, FALSE, 0, &is_trans);
@@ -5151,6 +5174,21 @@ mysql_compare_tables(TABLE *table,
thd->calloc(sizeof(void*) * table->s->keys)) == NULL)
DBUG_RETURN(1);
+ tmp_new_field_it.init(tmp_alter_info.create_list);
+ for (i= 0, f_ptr= table->field, tmp_new_field= tmp_new_field_it++;
+ (field= *f_ptr);
+ i++, f_ptr++, tmp_new_field= tmp_new_field_it++)
+ {
+ if (field->is_equal(tmp_new_field) == IS_EQUAL_NO &&
+ table->s->tmp_table == NO_TMP_TABLE)
+ (void) delete_statistics_for_column(thd, table, field);
+ else if (my_strcasecmp(system_charset_info,
+ field->field_name,
+ tmp_new_field->field_name))
+ (void) rename_column_in_stat_tables(thd, table, field,
+ tmp_new_field->field_name);
+ }
+
/*
Use transformed info to evaluate possibility of in-place ALTER TABLE
but use the preserved field to persist modifications.
@@ -5211,11 +5249,36 @@ mysql_compare_tables(TABLE *table,
if (my_strcasecmp(system_charset_info,
field->field_name,
tmp_new_field->field_name))
- field->flags|= FIELD_IS_RENAMED;
+ {
+ field->flags|= FIELD_IS_RENAMED;
+ if (table->s->tmp_table == NO_TMP_TABLE)
+ rename_column_in_stat_tables(thd, table, field,
+ tmp_new_field->field_name);
+ }
/* Evaluate changes bitmap and send to check_if_incompatible_data() */
if (!(tmp= field->is_equal(tmp_new_field)))
{
+ if (table->s->tmp_table == NO_TMP_TABLE)
+ {
+ KEY *key_info= table->key_info;
+ for (uint i=0; i < table->s->keys; i++, key_info++)
+ {
+ if (field->part_of_key.is_set(i))
+ {
+ uint key_parts= table->actual_n_key_parts(key_info);
+ for (uint j= 0; j < key_parts; j++)
+ {
+ if (key_info->key_part[j].fieldnr-1 == field->field_index)
+ {
+ (void) delete_statistics_for_index(thd, table, key_info,
+ j >= key_info->key_parts);
+ break;
+ }
+ }
+ }
+ }
+ }
DBUG_PRINT("info", ("!field_is_equal('%s') -> ALTER_TABLE_DATA_CHANGED",
new_field->field_name));
DBUG_RETURN(0);
@@ -5314,6 +5377,21 @@ mysql_compare_tables(TABLE *table,
field= table->field[key_part->fieldnr];
field->flags|= FIELD_IN_ADD_INDEX;
}
+ if (table->s->tmp_table == NO_TMP_TABLE)
+ {
+ (void) delete_statistics_for_index(thd, table, table_key, FALSE);
+ if ((uint) (table_key - table->key_info) == table->s->primary_key)
+ {
+ KEY *tab_key_info= table->key_info;
+ for (uint j=0; j < table->s->keys; j++, tab_key_info++)
+ {
+ if (tab_key_info->key_parts != tab_key_info->ext_key_parts)
+ (void) delete_statistics_for_index(thd, table, tab_key_info,
+ TRUE);
+ }
+ }
+ }
+
DBUG_PRINT("info", ("index changed: '%s'", table_key->name));
}
/*end of for (; table_key < table_key_end;) */
@@ -5515,6 +5593,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
uint used_fields= create_info->used_fields;
KEY *key_info=table->key_info;
bool rc= TRUE;
+ bool modified_primary_key= FALSE;
Create_field *def;
Field **f_ptr,*field;
DBUG_ENTER("mysql_prepare_alter_table");
@@ -5571,6 +5650,8 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
}
if (drop)
{
+ if (table->s->tmp_table == NO_TMP_TABLE)
+ (void) delete_statistics_for_column(thd, table, field);
drop_it.remove();
/*
ALTER TABLE DROP COLUMN always changes table data even in cases
@@ -5709,7 +5790,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
Collect all keys which isn't in drop list. Add only those
for which some fields exists.
*/
-
+
for (uint i=0 ; i < table->s->keys ; i++,key_info++)
{
char *key_name= key_info->name;
@@ -5723,12 +5804,27 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
}
if (drop)
{
+ if (table->s->tmp_table == NO_TMP_TABLE)
+ {
+ (void) delete_statistics_for_index(thd, table, key_info, FALSE);
+ if (i == table->s->primary_key)
+ {
+ KEY *tab_key_info= table->key_info;
+ for (uint j=0; j < table->s->keys; j++, tab_key_info++)
+ {
+ if (tab_key_info->key_parts != tab_key_info->ext_key_parts)
+ (void) delete_statistics_for_index(thd, table, tab_key_info,
+ TRUE);
+ }
+ }
+ }
drop_it.remove();
continue;
}
KEY_PART_INFO *key_part= key_info->key_part;
key_parts.empty();
+ bool delete_index_stat= FALSE;
for (uint j=0 ; j < key_info->key_parts ; j++,key_part++)
{
if (!key_part->field)
@@ -5751,7 +5847,12 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
break;
}
if (!cfield)
+ {
+ if (table->s->primary_key == i)
+ modified_primary_key= TRUE;
+ delete_index_stat= TRUE;
continue; // Field is removed
+ }
key_part_length= key_part->length;
if (cfield->field) // Not new field
{
@@ -5793,6 +5894,15 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
strlen(cfield->field_name),
key_part_length));
}
+ if (table->s->tmp_table == NO_TMP_TABLE)
+ {
+ if (delete_index_stat)
+ (void) delete_statistics_for_index(thd, table, key_info, FALSE);
+ else if (modified_primary_key &&
+ key_info->key_parts != key_info->ext_key_parts)
+ (void) delete_statistics_for_index(thd, table, key_info, TRUE);
+ }
+
if (key_parts.elements)
{
KEY_CREATE_INFO key_create_info;
@@ -5972,6 +6082,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
enum ha_extra_function extra_func= thd->locked_tables_mode
? HA_EXTRA_NOT_USED
: HA_EXTRA_FORCE_REOPEN;
+ LEX_STRING old_db_name= { table_list->db, table_list->db_length };
+ LEX_STRING old_table_name= { table_list->table_name,
+ table_list->table_name_length };
DBUG_ENTER("mysql_alter_table");
/*
@@ -6290,6 +6403,12 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
else
{
*fn_ext(new_name)=0;
+
+ LEX_STRING new_db_name= { new_db, strlen(new_db) };
+ LEX_STRING new_table_name= { new_alias, strlen(new_alias) };
+ (void) rename_table_in_stat_tables(thd, &old_db_name, &old_table_name,
+ &new_db_name, &new_table_name);
+
if (mysql_rename_table(old_db_type,db,table_name,new_db,new_alias, 0))
error= -1;
else if (Table_triggers_list::change_table_name(thd, db,
@@ -6376,6 +6495,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
need_copy_table= alter_info->change_level;
set_table_default_charset(thd, create_info, db);
+ promote_first_timestamp_column(&alter_info->create_list);
if (thd->variables.old_alter_table
|| (table->s->db_type() != create_info->db_type)
@@ -6753,8 +6873,6 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
*/
if (new_table && !(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER))
{
- /* We don't want update TIMESTAMP fields during ALTER TABLE. */
- new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
new_table->next_number_field=new_table->found_next_number_field;
DBUG_EXECUTE_IF("abort_copy_table", {
my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
@@ -7044,6 +7162,15 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
table is renamed and the SE is also changed, then an intermediate table
is created and the additional call will not take place.
*/
+
+ if (new_name != table_name || new_db != db)
+ {
+ LEX_STRING new_db_name= { new_db, strlen(new_db) };
+ LEX_STRING new_table_name= { new_name, strlen(new_name) };
+ (void) rename_table_in_stat_tables(thd, &old_db_name, &old_table_name,
+ &new_db_name, &new_table_name);
+ }
+
if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
{
DBUG_ASSERT(new_db_type == old_db_type);
@@ -7325,11 +7452,13 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
List<Item> fields;
List<Item> all_fields;
ha_rows examined_rows;
+ ha_rows found_rows;
bool auto_increment_field_copied= 0;
ulonglong save_sql_mode= thd->variables.sql_mode;
ulonglong prev_insert_id, time_to_report_progress;
List_iterator<Create_field> it(create);
Create_field *def;
+ Field **dfield_ptr= to->default_field;
DBUG_ENTER("copy_data_between_tables");
/* Two or 3 stages; Sorting, copying data and update indexes */
@@ -7355,10 +7484,12 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
MODE_STRICT_ALL_TABLES));
from->file->info(HA_STATUS_VARIABLE);
- to->file->ha_start_bulk_insert(from->file->stats.records);
+ to->file->ha_start_bulk_insert(from->file->stats.records,
+ ignore ? 0 : HA_CREATE_UNIQUE_INDEX_BY_SORT);
errpos= 3;
copy_end=copy;
+ to->s->default_fields= 0;
for (Field **ptr=to->field ; *ptr ; ptr++)
{
def=it++;
@@ -7378,8 +7509,23 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
}
(copy_end++)->set(*ptr,def->field,0);
}
-
+ else
+ {
+ /*
+ Update the set of auto-update fields to contain only the new fields
+ added to the table. Only these fields should be updated automatically.
+ Old fields keep their current values, and therefore should not be
+ present in the set of autoupdate fields.
+ */
+ if ((*ptr)->has_insert_default_function())
+ {
+ *(dfield_ptr++)= *ptr;
+ ++to->s->default_fields;
+ }
+ }
}
+ if (dfield_ptr)
+ *dfield_ptr= NULL;
if (order)
{
@@ -7396,7 +7542,8 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
else
{
from->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE),
- MYF(MY_FAE | MY_ZEROFILL));
+ MYF(MY_FAE | MY_ZEROFILL |
+ MY_THREAD_SPECIFIC));
bzero((char *) &tables, sizeof(tables));
tables.table= from;
tables.alias= tables.table_name= from->s->table_name.str;
@@ -7408,8 +7555,9 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
&tables, fields, all_fields, order) ||
!(sortorder= make_unireg_sortorder(order, &length, NULL)) ||
(from->sort.found_records= filesort(thd, from, sortorder, length,
- (SQL_SELECT *) 0, HA_POS_ERROR,
- 1, &examined_rows)) ==
+ NULL, HA_POS_ERROR,
+ true,
+ &examined_rows, &found_rows)) ==
HA_POS_ERROR)
goto err;
}
@@ -7469,6 +7617,11 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
prev_insert_id= to->file->next_insert_id;
if (to->vfield)
update_virtual_fields(thd, to, VCOL_UPDATE_FOR_WRITE);
+ if (to->default_field && to->update_default_fields())
+ {
+ error= 1;
+ break;
+ }
if (thd->is_error())
{
error= 1;
diff --git a/sql/sql_table.h b/sql/sql_table.h
index 00de6ed1b8d..9d5e768a5a3 100644
--- a/sql/sql_table.h
+++ b/sql/sql_table.h
@@ -187,7 +187,6 @@ void close_cached_table(THD *thd, TABLE *table);
void sp_prepare_create_field(THD *thd, Create_field *sql_field);
int prepare_create_field(Create_field *sql_field,
uint *blob_columns,
- int *timestamps, int *timestamps_with_niladic,
longlong table_flags);
CHARSET_INFO* get_sql_field_charset(Create_field *sql_field,
HA_CREATE_INFO *create_info);
@@ -208,6 +207,9 @@ void execute_ddl_log_recovery();
bool execute_ddl_log_entry(THD *thd, uint first_entry);
bool check_duplicate_warning(THD *thd, char *msg, ulong length);
+template<typename T> class List;
+void promote_first_timestamp_column(List<Create_field> *column_definitions);
+
/*
These prototypes where under INNODB_COMPATIBILITY_HOOKS.
*/
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index 25ab84fe4db..5b3286d77b0 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -484,7 +484,9 @@ static void display_table_locks(void)
void *saved_base;
DYNAMIC_ARRAY saved_table_locks;
- (void) my_init_dynamic_array(&saved_table_locks,sizeof(TABLE_LOCK_INFO), cached_open_tables() + 20,50);
+ (void) my_init_dynamic_array(&saved_table_locks,sizeof(TABLE_LOCK_INFO),
+ cached_open_tables() + 20, 50,
+ MYF(MY_THREAD_SPECIFIC));
mysql_mutex_lock(&THR_LOCK_lock);
for (list= thr_lock_thread_list; list; list= list_rest(list))
{
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index aff00f9fcf4..d7d902bc6b0 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -1779,7 +1779,7 @@ bool Table_triggers_list::drop_all_triggers(THD *thd, char *db, char *name)
DBUG_ENTER("drop_all_triggers");
bzero(&table, sizeof(table));
- init_sql_alloc(&table.mem_root, 8192, 0);
+ init_sql_alloc(&table.mem_root, 8192, 0, MYF(0));
if (Table_triggers_list::check_n_load(thd, db, name, &table, 1))
{
@@ -1999,7 +1999,7 @@ bool Table_triggers_list::change_table_name(THD *thd, const char *db,
DBUG_ENTER("change_table_name");
bzero(&table, sizeof(table));
- init_sql_alloc(&table.mem_root, 8192, 0);
+ init_sql_alloc(&table.mem_root, 8192, 0, MYF(0));
/*
This method interfaces the mysql server code protected by
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 169e0d9e418..c792dca873c 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -151,7 +151,7 @@ void udf_init()
mysql_rwlock_init(key_rwlock_THR_LOCK_udf, &THR_LOCK_udf);
- init_sql_alloc(&mem, UDF_ALLOC_BLOCK_SIZE, 0);
+ init_sql_alloc(&mem, UDF_ALLOC_BLOCK_SIZE, 0, MYF(0));
THD *new_thd = new THD;
if (!new_thd ||
my_hash_init(&udf_hash,system_charset_info,32,0,0,get_hash_key, NULL, 0))
@@ -258,7 +258,7 @@ end:
close_mysql_tables(new_thd);
delete new_thd;
/* Remember that we don't have a THD */
- my_pthread_setspecific_ptr(THR_THD, 0);
+ set_current_thd(0);
DBUG_VOID_RETURN;
}
@@ -428,7 +428,6 @@ int mysql_create_function(THD *thd,udf_func *udf)
TABLE *table;
TABLE_LIST tables;
udf_func *u_d;
- bool save_binlog_row_based;
DBUG_ENTER("mysql_create_function");
if (!initialized)
@@ -459,13 +458,6 @@ int mysql_create_function(THD *thd,udf_func *udf)
DBUG_RETURN(1);
}
- /*
- Turn off row binlogging of this statement and use statement-based
- so that all supporting tables are updated for CREATE FUNCTION command.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
mysql_rwlock_wrlock(&THR_LOCK_udf);
if ((my_hash_search(&udf_hash,(uchar*) udf->name.str, udf->name.length)))
{
@@ -533,27 +525,14 @@ int mysql_create_function(THD *thd,udf_func *udf)
/* Binlog the create function. */
if (write_bin_log(thd, TRUE, thd->query(), thd->query_length()))
- {
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(1);
- }
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
+
DBUG_RETURN(0);
err:
if (new_dl)
dlclose(dl);
mysql_rwlock_unlock(&THR_LOCK_udf);
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(1);
}
@@ -565,7 +544,6 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name)
udf_func *udf;
char *exact_name_str;
uint exact_name_len;
- bool save_binlog_row_based;
DBUG_ENTER("mysql_drop_function");
if (!initialized)
@@ -577,13 +555,6 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name)
DBUG_RETURN(1);
}
- /*
- Turn off row binlogging of this statement and use statement-based
- so that all supporting tables are updated for DROP FUNCTION command.
- */
- if ((save_binlog_row_based= thd->is_current_stmt_binlog_format_row()))
- thd->clear_current_stmt_binlog_format_row();
-
mysql_rwlock_wrlock(&THR_LOCK_udf);
if (!(udf=(udf_func*) my_hash_search(&udf_hash,(uchar*) udf_name->str,
(uint) udf_name->length)))
@@ -623,24 +594,12 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name)
while binlogging, to avoid binlog inconsistency.
*/
if (write_bin_log(thd, TRUE, thd->query(), thd->query_length()))
- {
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(1);
- }
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
+
DBUG_RETURN(0);
+
err:
mysql_rwlock_unlock(&THR_LOCK_udf);
- /* Restore the state of binlog format */
- DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
- if (save_binlog_row_based)
- thd->set_current_stmt_binlog_format_row();
DBUG_RETURN(1);
}
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 77d4df1b398..eb4454ecab3 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -63,7 +63,7 @@ int select_union::send_data(List<Item> &values)
return 0;
if (table->no_rows_with_nulls)
table->null_catch_flags= CHECK_ROW_FOR_NULLS_TO_REJECT;
- fill_record(thd, table->field, values, TRUE, FALSE);
+ fill_record(thd, table, table->field, values, TRUE, FALSE);
if (thd->is_error())
return 1;
if (table->no_rows_with_nulls)
@@ -722,6 +722,8 @@ bool st_select_lex_unit::exec()
}
}
+ DBUG_EXECUTE_IF("show_explain_probe_union_read",
+ dbug_serve_apcs(thd, 1););
/* Send result to 'result' */
saved_error= TRUE;
{
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index f59dfb3bc93..28b9fe7eacd 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -32,6 +32,7 @@
#include "sql_view.h" // check_key_in_view
#include "sp_head.h"
#include "sql_trigger.h"
+#include "sql_statistics.h"
#include "probes_mysql.h"
#include "debug_sync.h"
#include "key.h" // is_key_used
@@ -342,19 +343,8 @@ int mysql_update(THD *thd,
my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
DBUG_RETURN(1);
}
- if (table->timestamp_field)
- {
- // Don't set timestamp column if this is modified
- if (bitmap_is_set(table->write_set,
- table->timestamp_field->field_index))
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
- else
- {
- if (((uint) table->timestamp_field_type) & TIMESTAMP_AUTO_SET_ON_UPDATE)
- bitmap_set_bit(table->write_set,
- table->timestamp_field->field_index);
- }
- }
+ if (table->default_field)
+ table->mark_default_fields_for_write();
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* Check values */
@@ -389,7 +379,7 @@ int mysql_update(THD *thd,
to compare records and detect data change.
*/
if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) &&
- (((uint) table->timestamp_field_type) & TIMESTAMP_AUTO_SET_ON_UPDATE))
+ table->default_field && table->has_default_function(true))
bitmap_union(table->read_set, table->write_set);
// Don't count on usage of 'only index' when calculating which key to use
table->covering_keys.clear_all();
@@ -404,6 +394,7 @@ int mysql_update(THD *thd,
#endif
/* Update the table->file->stats.records number */
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
+ set_statistics_for_table(thd, table);
select= make_select(table, 0, 0, conds, 0, &error);
if (error || !limit || thd->is_error() ||
@@ -501,13 +492,16 @@ int mysql_update(THD *thd,
uint length= 0;
SORT_FIELD *sortorder;
ha_rows examined_rows;
+ ha_rows found_rows;
table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
- MYF(MY_FAE | MY_ZEROFILL));
+ MYF(MY_FAE | MY_ZEROFILL |
+ MY_THREAD_SPECIFIC));
if (!(sortorder=make_unireg_sortorder(order, &length, NULL)) ||
(table->sort.found_records= filesort(thd, table, sortorder, length,
- select, limit, 1,
- &examined_rows))
+ select, limit,
+ true,
+ &examined_rows, &found_rows))
== HA_POS_ERROR)
{
goto err;
@@ -694,8 +688,7 @@ int mysql_update(THD *thd,
continue; /* repeat the read of the same row if it still exists */
store_record(table,record[1]);
- if (fill_record_n_invoke_before_triggers(thd, fields, values, 0,
- table->triggers,
+ if (fill_record_n_invoke_before_triggers(thd, table, fields, values, 0,
TRG_EVENT_UPDATE))
break; /* purecov: inspected */
@@ -703,6 +696,11 @@ int mysql_update(THD *thd,
if (!can_compare_record || compare_record(table))
{
+ if (table->default_field && table->update_default_fields())
+ {
+ error= 1;
+ break;
+ }
if ((res= table_list->view_check_option(thd, ignore)) !=
VIEW_CHECK_OK)
{
@@ -1249,11 +1247,6 @@ int mysql_multi_update_prepare(THD *thd)
while ((tl= ti++))
{
TABLE *table= tl->table;
- /* Only set timestamp column if this is not modified */
- if (table->timestamp_field &&
- bitmap_is_set(table->write_set,
- table->timestamp_field->field_index))
- table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
/* if table will be updated then check that it is unique */
if (table->map & tables_for_update)
@@ -1503,8 +1496,7 @@ int multi_update::prepare(List<Item> &not_used_values,
to compare records and detect data change.
*/
if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) &&
- (((uint) table->timestamp_field_type) &
- TIMESTAMP_AUTO_SET_ON_UPDATE))
+ table->default_field && table->has_default_function(true))
bitmap_union(table->read_set, table->write_set);
}
}
@@ -1883,10 +1875,10 @@ int multi_update::send_data(List<Item> &not_used_values)
table->status|= STATUS_UPDATED;
store_record(table,record[1]);
- if (fill_record_n_invoke_before_triggers(thd, *fields_for_table[offset],
+ if (fill_record_n_invoke_before_triggers(thd, table, *fields_for_table[offset],
*values_for_table[offset], 0,
- table->triggers,
- TRG_EVENT_UPDATE))
+ TRG_EVENT_UPDATE) ||
+ (table->default_field && table->update_default_fields()))
DBUG_RETURN(1);
/*
@@ -1987,7 +1979,7 @@ int multi_update::send_data(List<Item> &not_used_values)
} while ((tbl= tbl_it++));
/* Store regular updated fields in the row. */
- fill_record(thd,
+ fill_record(thd, tmp_table,
tmp_table->field + 1 + unupdated_check_opt_tables.elements,
*values_for_table[offset], TRUE, FALSE);
@@ -2177,7 +2169,10 @@ int multi_update::do_updates()
for (copy_field_ptr=copy_field;
copy_field_ptr != copy_field_end;
copy_field_ptr++)
+ {
(*copy_field_ptr->do_copy)(copy_field_ptr);
+ copy_field_ptr->to_field->set_has_explicit_value();
+ }
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
@@ -2187,6 +2182,8 @@ int multi_update::do_updates()
if (!can_compare_record || compare_record(table))
{
int error;
+ if (table->default_field && (error= table->update_default_fields()))
+ goto err2;
if ((error= cur_table->view_check_option(thd, ignore)) !=
VIEW_CHECK_OK)
{
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 253ec61460e..9664aad1e19 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -65,6 +65,7 @@
#include <myisammrg.h>
#include "keycaches.h"
#include "set_var.h"
+#include "rpl_mi.h"
/* this is to get the bison compilation windows warnings out */
#ifdef _MSC_VER
@@ -882,11 +883,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token COLLATION_SYM /* SQL-2003-N */
%token COLUMNS
%token COLUMN_ADD_SYM
+%token COLUMN_CHECK_SYM
%token COLUMN_CREATE_SYM
%token COLUMN_DELETE_SYM
-%token COLUMN_EXISTS_SYM
%token COLUMN_GET_SYM
-%token COLUMN_LIST_SYM
%token COLUMN_SYM /* SQL-2003-R */
%token COLUMN_NAME_SYM /* SQL-2003-N */
%token COMMENT_SYM
@@ -1291,6 +1291,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token SIGNED_SYM
%token SIMPLE_SYM /* SQL-2003-N */
%token SLAVE
+%token SLAVES
%token SLOW
%token SMALLINT /* SQL-2003-R */
%token SNAPSHOT_SYM
@@ -1481,6 +1482,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
ev_alter_on_schedule_completion opt_ev_rename_to opt_ev_sql_stmt
optional_flush_tables_arguments opt_dyncol_type dyncol_type
opt_time_precision kill_type kill_option int_num
+ opt_default_time_precision
%type <m_yes_no_unk>
opt_chain opt_release
@@ -1594,7 +1596,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
show describe load alter optimize keycache preload flush
reset purge begin commit rollback savepoint release
slave master_def master_defs master_file_def slave_until_opts
- repair analyze check start checksum
+ repair analyze
+ analyze_table_list analyze_table_elem_spec
+ opt_persistent_stat_clause persistent_stat_spec
+ persistent_column_stat_spec persistent_index_stat_spec
+ table_column_list table_index_list table_index_name
+ check start checksum
field_list field_list_item field_spec kill column_def key_def
keycache_list keycache_list_or_parts assign_to_keycache
assign_to_keycache_parts
@@ -1904,7 +1911,7 @@ help:
/* change master */
change:
- CHANGE MASTER_SYM TO_SYM
+ CHANGE MASTER_SYM optional_connection_name TO_SYM
{
Lex->sql_command = SQLCOM_CHANGE_MASTER;
}
@@ -2053,6 +2060,29 @@ master_file_def:
}
;
+optional_connection_name:
+ /* empty */
+ {
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+ lex->mi.connection_name= thd->variables.default_master_connection;
+ }
+ | connection_name;
+ ;
+
+connection_name:
+ TEXT_STRING_sys
+ {
+ Lex->mi.connection_name= $1;
+#ifdef HAVE_REPLICATION
+ if (check_master_connection_name(&$1))
+ {
+ my_error(ER_WRONG_ARGUMENTS, MYF(0), "MASTER_CONNECTION_NAME");
+ MYSQL_YYABORT;
+ }
+#endif
+ }
+
/* create a table */
create:
@@ -5873,9 +5903,9 @@ attribute:
NULL_SYM { Lex->type&= ~ NOT_NULL_FLAG; }
| not NULL_SYM { Lex->type|= NOT_NULL_FLAG; }
| DEFAULT now_or_signed_literal { Lex->default_value=$2; }
- | ON UPDATE_SYM NOW_SYM optional_braces
+ | ON UPDATE_SYM NOW_SYM opt_default_time_precision
{
- Item *item= new (YYTHD->mem_root) Item_func_now_local(6);
+ Item *item= new (YYTHD->mem_root) Item_func_now_local($4);
if (item == NULL)
MYSQL_YYABORT;
Lex->on_update_value= item;
@@ -5967,9 +5997,9 @@ type_with_opt_collate:
now_or_signed_literal:
- NOW_SYM optional_braces
+ NOW_SYM opt_default_time_precision
{
- $$= new (YYTHD->mem_root) Item_func_now_local(6);
+ $$= new (YYTHD->mem_root) Item_func_now_local($2);
if ($$ == NULL)
MYSQL_YYABORT;
}
@@ -7074,7 +7104,7 @@ opt_to:
*/
slave:
- START_SYM SLAVE slave_thread_opts
+ START_SYM SLAVE optional_connection_name slave_thread_opts
{
LEX *lex=Lex;
lex->sql_command = SQLCOM_SLAVE_START;
@@ -7083,14 +7113,28 @@ slave:
}
slave_until
{}
- | STOP_SYM SLAVE slave_thread_opts
+ | START_SYM ALL SLAVES slave_thread_opts
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_SLAVE_ALL_START;
+ lex->type = 0;
+ }
+ {}
+ | STOP_SYM SLAVE optional_connection_name slave_thread_opts
{
LEX *lex=Lex;
lex->sql_command = SQLCOM_SLAVE_STOP;
lex->type = 0;
/* If you change this code don't forget to update SLAVE STOP too */
}
- | SLAVE START_SYM slave_thread_opts
+ | STOP_SYM ALL SLAVES slave_thread_opts
+ {
+ LEX *lex=Lex;
+ lex->sql_command = SQLCOM_SLAVE_ALL_STOP;
+ lex->type = 0;
+ /* If you change this code don't forget to update SLAVE STOP too */
+ }
+ | SLAVE optional_connection_name START_SYM slave_thread_opts
{
LEX *lex=Lex;
lex->sql_command = SQLCOM_SLAVE_START;
@@ -7098,7 +7142,7 @@ slave:
}
slave_until
{}
- | SLAVE STOP_SYM slave_thread_opts
+ | SLAVE optional_connection_name STOP_SYM slave_thread_opts
{
LEX *lex=Lex;
lex->sql_command = SQLCOM_SLAVE_STOP;
@@ -7229,7 +7273,7 @@ analyze:
/* Will be overriden during execution. */
YYPS->m_lock_type= TL_UNLOCK;
}
- table_list
+ analyze_table_list
{
THD *thd= YYTHD;
LEX* lex= thd->lex;
@@ -7240,6 +7284,96 @@ analyze:
}
;
+analyze_table_list:
+ analyze_table_elem_spec
+ | analyze_table_list ',' analyze_table_elem_spec
+ ;
+
+analyze_table_elem_spec:
+ table_name opt_persistent_stat_clause
+ ;
+
+opt_persistent_stat_clause:
+ /* empty */
+ {}
+ | PERSISTENT_SYM FOR_SYM persistent_stat_spec
+ {
+ THD *thd= YYTHD;
+ thd->lex->with_persistent_for_clause= TRUE;
+ }
+ ;
+
+persistent_stat_spec:
+ ALL
+ {}
+ | COLUMNS persistent_column_stat_spec INDEXES persistent_index_stat_spec
+ {}
+
+persistent_column_stat_spec:
+ ALL {}
+ | '('
+ {
+ THD *thd= YYTHD;
+ LEX* lex= thd->lex;
+ lex->column_list= new List<LEX_STRING>;
+ if (lex->column_list == NULL)
+ MYSQL_YYABORT;
+ }
+ table_column_list
+ ')'
+ ;
+
+persistent_index_stat_spec:
+ ALL {}
+ | '('
+ {
+ THD *thd= YYTHD;
+ LEX* lex= thd->lex;
+ lex->index_list= new List<LEX_STRING>;
+ if (lex->index_list == NULL)
+ MYSQL_YYABORT;
+ }
+ table_index_list
+ ')'
+ ;
+
+table_column_list:
+ /* empty */
+ {}
+ | ident
+ {
+ Lex->column_list->push_back((LEX_STRING*)
+ sql_memdup(&$1, sizeof(LEX_STRING)));
+ }
+ | table_column_list ',' ident
+ {
+ Lex->column_list->push_back((LEX_STRING*)
+ sql_memdup(&$3, sizeof(LEX_STRING)));
+ }
+ ;
+
+table_index_list:
+ /* empty */
+ {}
+ | table_index_name
+ | table_index_list ',' table_index_name
+ ;
+
+table_index_name:
+ ident
+ {
+ Lex->index_list->push_back(
+ (LEX_STRING*) sql_memdup(&$1, sizeof(LEX_STRING)));
+ }
+ |
+ PRIMARY_SYM
+ {
+ LEX_STRING str= {(char*) "PRIMARY", 7};
+ Lex->index_list->push_back(
+ (LEX_STRING*) sql_memdup(&str, sizeof(LEX_STRING)));
+ }
+ ;
+
binlog_base64_event:
BINLOG_SYM TEXT_STRING_sys
{
@@ -7743,6 +7877,12 @@ select_alias:
| TEXT_STRING_sys { $$=$1; }
;
+opt_default_time_precision:
+ /* empty */ { $$= NOT_FIXED_DEC; }
+ | '(' ')' { $$= NOT_FIXED_DEC; }
+ | '(' real_ulong_num ')' { $$= $2; };
+ ;
+
opt_time_precision:
/* empty */ { $$= 0; }
| '(' ')' { $$= 0; }
@@ -8262,7 +8402,7 @@ dyncall_create_element:
alloc_root(YYTHD->mem_root, sizeof(DYNCALL_CREATE_DEF));
if ($$ == NULL)
MYSQL_YYABORT;
- $$->num= $1;
+ $$->key= $1;
$$->value= $3;
$$->type= (DYNAMIC_COLUMN_TYPE)$4;
$$->cs= lex->charset;
@@ -8816,16 +8956,9 @@ function_call_nonkeyword:
MYSQL_YYABORT;
}
|
- COLUMN_EXISTS_SYM '(' expr ',' expr ')'
- {
- $$= new (YYTHD->mem_root) Item_func_dyncol_exists($3, $5);
- if ($$ == NULL)
- MYSQL_YYABORT;
- }
- |
- COLUMN_LIST_SYM '(' expr ')'
+ COLUMN_CHECK_SYM '(' expr ')'
{
- $$= new (YYTHD->mem_root) Item_func_dyncol_list($3);
+ $$= new (YYTHD->mem_root) Item_func_dyncol_check($3);
if ($$ == NULL)
MYSQL_YYABORT;
}
@@ -11429,7 +11562,7 @@ show_param:
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_BINLOG_EVENTS;
} opt_limit_clause_init
- | RELAYLOG_SYM EVENTS_SYM binlog_in binlog_from
+ | RELAYLOG_SYM optional_connection_name EVENTS_SYM binlog_in binlog_from
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_RELAYLOG_EVENTS;
@@ -11566,9 +11699,23 @@ show_param:
{
Lex->sql_command = SQLCOM_SHOW_MASTER_STAT;
}
+ | ALL SLAVES STATUS_SYM
+ {
+ Lex->sql_command = SQLCOM_SHOW_SLAVE_STAT;
+ Lex->verbose= 1;
+ }
| SLAVE STATUS_SYM
{
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+ lex->mi.connection_name= thd->variables.default_master_connection;
+ lex->sql_command = SQLCOM_SHOW_SLAVE_STAT;
+ lex->verbose= 0;
+ }
+ | SLAVE connection_name STATUS_SYM
+ {
Lex->sql_command = SQLCOM_SHOW_SLAVE_STAT;
+ Lex->verbose= 0;
}
| CLIENT_STATS_SYM
{
@@ -11647,6 +11794,14 @@ show_param:
Lex->spname= $3;
Lex->sql_command = SQLCOM_SHOW_CREATE_EVENT;
}
+ | describe_command FOR_SYM expr
+ {
+ THD *thd= YYTHD;
+ Lex->sql_command= SQLCOM_SHOW_EXPLAIN;
+ if (prepare_schema_table(thd, Lex, 0, SCH_EXPLAIN))
+ MYSQL_YYABORT;
+ add_value_to_list(thd, $3);
+ }
;
show_engine_param:
@@ -11820,8 +11975,17 @@ flush_option:
{ Lex->type|= REFRESH_SLOW_LOG; }
| BINARY LOGS_SYM
{ Lex->type|= REFRESH_BINARY_LOG; }
- | RELAY LOGS_SYM
- { Lex->type|= REFRESH_RELAY_LOG; }
+ | RELAY LOGS_SYM optional_connection_name
+ {
+ LEX *lex= Lex;
+ if (lex->type & REFRESH_RELAY_LOG)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0), "FLUSH", "RELAY LOGS");
+ MYSQL_YYABORT;
+ }
+ lex->type|= REFRESH_RELAY_LOG;
+ lex->relay_log_connection_name= lex->mi.connection_name;
+ }
| QUERY_SYM CACHE_SYM
{ Lex->type|= REFRESH_QUERY_CACHE_FREE; }
| HOSTS_SYM
@@ -11829,13 +11993,23 @@ flush_option:
| PRIVILEGES
{ Lex->type|= REFRESH_GRANT; }
| LOGS_SYM
- { Lex->type|= REFRESH_LOG; }
+ {
+ Lex->type|= REFRESH_LOG;
+ Lex->relay_log_connection_name.str= (char*) "";
+ Lex->relay_log_connection_name.length= 0;
+ }
| STATUS_SYM
{ Lex->type|= REFRESH_STATUS; }
- | SLAVE
+ | SLAVE optional_connection_name
{
- Lex->type|= REFRESH_SLAVE;
- Lex->reset_slave_info.all= false;
+ LEX *lex= Lex;
+ if (lex->type & REFRESH_SLAVE)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0), "FLUSH","SLAVE");
+ MYSQL_YYABORT;
+ }
+ lex->type|= REFRESH_SLAVE;
+ lex->reset_slave_info.all= false;
}
| CLIENT_STATS_SYM
{ Lex->type|= REFRESH_CLIENT_STATS; }
@@ -11879,6 +12053,7 @@ reset_options:
reset_option:
SLAVE { Lex->type|= REFRESH_SLAVE; }
+ optional_connection_name
slave_reset_options { }
| MASTER_SYM { Lex->type|= REFRESH_MASTER; }
| QUERY_SYM CACHE_SYM { Lex->type|= REFRESH_QUERY_CACHE;}
@@ -12939,11 +13114,10 @@ keyword:
| CHECKPOINT_SYM {}
| CLOSE_SYM {}
| COLUMN_ADD_SYM {}
+ | COLUMN_CHECK_SYM {}
| COLUMN_CREATE_SYM {}
| COLUMN_DELETE_SYM {}
- | COLUMN_EXISTS_SYM {}
| COLUMN_GET_SYM {}
- | COLUMN_LIST_SYM {}
| COMMENT_SYM {}
| COMMIT_SYM {}
| CONTAINS_SYM {}
@@ -12977,6 +13151,7 @@ keyword:
| SIGNED_SYM {}
| SOCKET_SYM {}
| SLAVE {}
+ | SLAVES {}
| SONAME_SYM {}
| START_SYM {}
| STOP_SYM {}
diff --git a/sql/strfunc.cc b/sql/strfunc.cc
index 0c0742b3805..9603ca30cfa 100644
--- a/sql/strfunc.cc
+++ b/sql/strfunc.cc
@@ -15,6 +15,7 @@
/* Some useful string utility functions used by the MySQL server */
+#include <my_global.h>
#include "sql_priv.h"
#include "unireg.h"
#include "strfunc.h"
diff --git a/sql/structs.h b/sql/structs.h
index ae71819ae09..a3a54c524e6 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -29,6 +29,7 @@
struct TABLE;
class Field;
+class Index_statistics;
class THD;
@@ -96,6 +97,11 @@ typedef struct st_key {
uint block_size;
uint name_length;
enum ha_key_alg algorithm;
+ /*
+ The flag is on if statistical data for the index prefixes
+ has to be taken from the system statistical tables.
+ */
+ bool is_statistics_from_stat_tables;
/*
Note that parser is used when the table is opened for use, and
parser_name is used when the table is being created.
@@ -115,6 +121,18 @@ typedef struct st_key {
For temporary heap tables this member is NULL.
*/
ulong *rec_per_key;
+
+ /*
+ This structure is used for statistical data on the index
+ that has been read from the statistical table index_stat
+ */
+ Index_statistics *read_stats;
+ /*
+ This structure is used for statistical data on the index that
+ is collected by the function collect_statistics_for_table
+ */
+ Index_statistics *collected_stats;
+
union {
int bdb_return_if_eq;
} handler;
@@ -123,6 +141,9 @@ typedef struct st_key {
/** reference to the list of options or NULL */
engine_option_value *option_list;
ha_index_option_struct *option_struct; /* structure with parsed options */
+
+ double actual_rec_per_key(uint i);
+
} KEY;
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index c1c6e142706..d0b39f2b9b0 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -690,7 +690,7 @@ static bool event_scheduler_update(sys_var *self, THD *thd, enum_var_type type)
: Events::stop();
mysql_mutex_lock(&LOCK_global_system_variables);
if (ret)
- my_error(ER_EVENT_SET_VAR_ERROR, MYF(0), 0);
+ my_error(ER_EVENT_SET_VAR_ERROR, MYF(0), my_errno);
return ret;
}
@@ -797,6 +797,30 @@ static Sys_var_lexstring Sys_init_connect(
DEFAULT(""), &PLock_sys_init_connect, NOT_IN_BINLOG,
ON_CHECK(check_init_string));
+#ifdef HAVE_REPLICATION
+static bool check_master_connection(sys_var *self, THD *thd, set_var *var)
+{
+ LEX_STRING tmp;
+ tmp.str= var->save_result.string_value.str;
+ tmp.length= var->save_result.string_value.length;
+ if (!tmp.str || check_master_connection_name(&tmp))
+ {
+ my_error(ER_WRONG_ARGUMENTS, MYF(ME_JUST_WARNING),
+ var->var->name.str);
+ return true;
+ }
+ return false;
+}
+
+static Sys_var_session_lexstring Sys_default_master_connection(
+ "default_master_connection",
+ "Master connection to use for all slave variables and slave commands",
+ SESSION_ONLY(default_master_connection),
+ NO_CMD_LINE, IN_SYSTEM_CHARSET,
+ DEFAULT(""), MAX_CONNECTION_NAME, ON_CHECK(check_master_connection),
+ ON_UPDATE(0));
+#endif
+
static Sys_var_charptr Sys_init_file(
"init_file", "Read SQL commands from this file at startup",
READ_ONLY GLOBAL_VAR(opt_init_file),
@@ -1082,16 +1106,12 @@ static Sys_var_ulonglong Sys_max_binlog_stmt_cache_size(
static bool fix_max_binlog_size(sys_var *self, THD *thd, enum_var_type type)
{
mysql_bin_log.set_max_size(max_binlog_size);
-#ifdef HAVE_REPLICATION
- if (!max_relay_log_size)
- active_mi->rli.relay_log.set_max_size(max_binlog_size);
-#endif
return false;
}
static Sys_var_ulong Sys_max_binlog_size(
"max_binlog_size",
"Binary log will be rotated automatically when the size exceeds this "
- "value. Will also apply to relay logs if max_relay_log_size is 0",
+ "value.",
GLOBAL_VAR(max_binlog_size), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(IO_SIZE, 1024*1024L*1024L), DEFAULT(1024*1024L*1024L),
BLOCK_SIZE(IO_SIZE), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
@@ -1236,24 +1256,6 @@ static Sys_var_ulong Sys_max_prepared_stmt_count(
VALID_RANGE(0, 1024*1024), DEFAULT(16382), BLOCK_SIZE(1),
&PLock_prepared_stmt_count);
-static bool fix_max_relay_log_size(sys_var *self, THD *thd, enum_var_type type)
-{
-#ifdef HAVE_REPLICATION
- active_mi->rli.relay_log.set_max_size(max_relay_log_size ?
- max_relay_log_size: max_binlog_size);
-#endif
- return false;
-}
-static Sys_var_ulong Sys_max_relay_log_size(
- "max_relay_log_size",
- "If non-zero: relay log will be rotated automatically when the "
- "size exceeds this value; if zero: when the size "
- "exceeds max_binlog_size",
- GLOBAL_VAR(max_relay_log_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, 1024L*1024*1024), DEFAULT(0), BLOCK_SIZE(IO_SIZE),
- NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
- ON_UPDATE(fix_max_relay_log_size));
-
static Sys_var_ulong Sys_max_sort_length(
"max_sort_length",
"The number of bytes to use when sorting BLOB or TEXT values (only "
@@ -1428,7 +1430,7 @@ static bool fix_optimizer_search_depth(sys_var *self, THD *thd,
{
SV *sv= type == OPT_GLOBAL ? &global_system_variables : &thd->variables;
if (sv->optimizer_search_depth == MAX_TABLES+2)
- WARN_DEPRECATED(thd, 6, 0, "optimizer-search-depth=63",
+ WARN_DEPRECATED(thd, 10, 1, "optimizer-search-depth=63",
"a search depth less than 63");
return false;
}
@@ -2024,50 +2026,22 @@ static Sys_var_mybool Sys_master_verify_checksum(
static const char *replicate_events_marked_for_skip_names[]= {
"replicate", "filter_on_slave", "filter_on_master", 0
};
-static bool
-replicate_events_marked_for_skip_check(sys_var *self, THD *thd,
- set_var *var)
-{
- int thread_mask;
- DBUG_ENTER("sys_var_replicate_events_marked_for_skip_check");
- /* Slave threads must be stopped to change the variable. */
- mysql_mutex_lock(&LOCK_active_mi);
- lock_slave_threads(active_mi);
- init_thread_mask(&thread_mask, active_mi, 0 /*not inverse*/);
- unlock_slave_threads(active_mi);
- mysql_mutex_unlock(&LOCK_active_mi);
-
- if (thread_mask) // We refuse if any slave thread is running
- {
- my_error(ER_SLAVE_MUST_STOP, MYF(0));
- DBUG_RETURN(true);
- }
- DBUG_RETURN(false);
-}
bool
Sys_var_replicate_events_marked_for_skip::global_update(THD *thd, set_var *var)
{
- bool result;
- int thread_mask;
+ bool result= true; // Assume error
DBUG_ENTER("Sys_var_replicate_events_marked_for_skip::global_update");
- /* Slave threads must be stopped to change the variable. */
+ mysql_mutex_unlock(&LOCK_global_system_variables);
mysql_mutex_lock(&LOCK_active_mi);
- lock_slave_threads(active_mi);
- init_thread_mask(&thread_mask, active_mi, 0 /*not inverse*/);
- if (thread_mask) // We refuse if any slave thread is running
- {
- my_error(ER_SLAVE_MUST_STOP, MYF(0));
- result= true;
- }
- else
+ if (!master_info_index->give_error_if_slave_running())
result= Sys_var_enum::global_update(thd, var);
-
- unlock_slave_threads(active_mi);
mysql_mutex_unlock(&LOCK_active_mi);
+ mysql_mutex_lock(&LOCK_global_system_variables);
DBUG_RETURN(result);
}
+
static Sys_var_replicate_events_marked_for_skip Replicate_events_marked_for_skip
("replicate_events_marked_for_skip",
"Whether the slave should replicate events that were created with "
@@ -2078,8 +2052,7 @@ static Sys_var_replicate_events_marked_for_skip Replicate_events_marked_for_skip
"the slave).",
GLOBAL_VAR(opt_replicate_events_marked_for_skip), CMD_LINE(REQUIRED_ARG),
replicate_events_marked_for_skip_names, DEFAULT(RPL_SKIP_REPLICATE),
- NO_MUTEX_GUARD, NOT_IN_BINLOG,
- ON_CHECK(replicate_events_marked_for_skip_check));
+ NO_MUTEX_GUARD, NOT_IN_BINLOG);
#endif
@@ -3101,7 +3074,7 @@ static Sys_var_have Sys_have_geometry(
static Sys_var_have Sys_have_openssl(
"have_openssl", "have_openssl",
- READ_ONLY GLOBAL_VAR(have_ssl), NO_CMD_LINE);
+ READ_ONLY GLOBAL_VAR(have_openssl), NO_CMD_LINE);
static Sys_var_have Sys_have_profiling(
"have_profiling", "have_profiling",
@@ -3254,71 +3227,18 @@ static Sys_var_mybool Sys_relay_log_recovery(
"processed",
GLOBAL_VAR(relay_log_recovery), CMD_LINE(OPT_ARG), DEFAULT(FALSE));
-bool Sys_var_rpl_filter::do_check(THD *thd, set_var *var)
-{
- bool status;
-
- /*
- We must not be holding LOCK_global_system_variables here, otherwise we can
- deadlock with THD::init() which is invoked from within the slave threads
- with opposite locking order.
- */
- mysql_mutex_assert_not_owner(&LOCK_global_system_variables);
-
- mysql_mutex_lock(&LOCK_active_mi);
- mysql_mutex_lock(&active_mi->rli.run_lock);
-
- status= active_mi->rli.slave_running;
- mysql_mutex_unlock(&active_mi->rli.run_lock);
- mysql_mutex_unlock(&LOCK_active_mi);
-
- if (status)
- my_error(ER_SLAVE_MUST_STOP, MYF(0));
- else
- status= Sys_var_charptr::do_string_check(thd, var, charset(thd));
-
- return status;
-}
-
-void Sys_var_rpl_filter::lock(void)
+bool Sys_var_rpl_filter::global_update(THD *thd, set_var *var)
{
- /*
- Starting a slave thread causes the new thread to attempt to
- acquire LOCK_global_system_variables (in THD::init) while
- LOCK_active_mi is being held by the thread that initiated
- the process. In order to not violate the lock order, unlock
- LOCK_global_system_variables before grabbing LOCK_active_mi.
- */
- mysql_mutex_unlock(&LOCK_global_system_variables);
+ bool result= true; // Assume error
+ mysql_mutex_unlock(&LOCK_global_system_variables);
mysql_mutex_lock(&LOCK_active_mi);
- mysql_mutex_lock(&active_mi->rli.run_lock);
-}
-
-void Sys_var_rpl_filter::unlock(void)
-{
- mysql_mutex_unlock(&active_mi->rli.run_lock);
+ if (!master_info_index->give_error_if_slave_running())
+ result= set_filter_value(var->save_result.string_value.str);
mysql_mutex_unlock(&LOCK_active_mi);
-
mysql_mutex_lock(&LOCK_global_system_variables);
-}
-
-bool Sys_var_rpl_filter::global_update(THD *thd, set_var *var)
-{
- bool slave_running, status= false;
-
- lock();
-
- if (! (slave_running= active_mi->rli.slave_running))
- status= set_filter_value(var->save_result.string_value.str);
-
- if (slave_running)
- my_error(ER_SLAVE_MUST_STOP, MYF(0));
-
- unlock();
-
- return slave_running || status;
+ return result;
}
bool Sys_var_rpl_filter::set_filter_value(const char *value)
@@ -3356,8 +3276,6 @@ uchar *Sys_var_rpl_filter::global_value_ptr(THD *thd, LEX_STRING *base)
tmp.length(0);
- lock();
-
switch (opt_id) {
case OPT_REPLICATE_DO_DB:
rpl_filter->get_do_db(&tmp);
@@ -3379,8 +3297,6 @@ uchar *Sys_var_rpl_filter::global_value_ptr(THD *thd, LEX_STRING *base)
break;
}
- unlock();
-
return (uchar *) thd->strmake(tmp.ptr(), tmp.length());
}
@@ -3430,72 +3346,105 @@ static Sys_var_charptr Sys_slave_load_tmpdir(
READ_ONLY GLOBAL_VAR(slave_load_tmpdir), CMD_LINE(REQUIRED_ARG),
IN_FS_CHARSET, DEFAULT(0));
-static bool fix_slave_net_timeout(sys_var *self, THD *thd, enum_var_type type)
-{
- DEBUG_SYNC(thd, "fix_slave_net_timeout");
-
- mysql_mutex_unlock(&LOCK_global_system_variables);
- mysql_mutex_lock(&LOCK_active_mi);
- DBUG_PRINT("info", ("slave_net_timeout=%u mi->heartbeat_period=%.3f",
- slave_net_timeout,
- (active_mi? active_mi->heartbeat_period : 0.0)));
- if (active_mi && slave_net_timeout < active_mi->heartbeat_period)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX,
- ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX));
- mysql_mutex_unlock(&LOCK_active_mi);
- mysql_mutex_lock(&LOCK_global_system_variables);
- return false;
-}
static Sys_var_uint Sys_slave_net_timeout(
"slave_net_timeout", "Number of seconds to wait for more data "
- "from a master/slave connection before aborting the read",
+ "from any master/slave connection before aborting the read",
GLOBAL_VAR(slave_net_timeout), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(1, LONG_TIMEOUT), DEFAULT(SLAVE_NET_TIMEOUT), BLOCK_SIZE(1),
NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
- ON_UPDATE(fix_slave_net_timeout));
+ ON_UPDATE(0));
-static bool check_slave_skip_counter(sys_var *self, THD *thd, set_var *var)
+
+/*
+ Access a multi_source variable
+ Return 0 + warning if it doesn't exist
+*/
+
+uint Sys_var_multi_source_ulong::
+get_master_info_uint_value(THD *thd, ptrdiff_t offset)
{
- bool result= false;
+ Master_info *mi;
+ uint res= 0; // Default value
mysql_mutex_lock(&LOCK_active_mi);
- mysql_mutex_lock(&active_mi->rli.run_lock);
- if (active_mi->rli.slave_running)
+ mi= master_info_index->
+ get_master_info(&thd->variables.default_master_connection,
+ MYSQL_ERROR::WARN_LEVEL_WARN);
+ if (mi)
{
- my_message(ER_SLAVE_MUST_STOP, ER(ER_SLAVE_MUST_STOP), MYF(0));
- result= true;
+ mysql_mutex_lock(&mi->rli.data_lock);
+ res= *((uint*) (((uchar*) mi) + master_info_offset));
+ mysql_mutex_unlock(&mi->rli.data_lock);
}
- mysql_mutex_unlock(&active_mi->rli.run_lock);
- mysql_mutex_unlock(&LOCK_active_mi);
- return result;
+ mysql_mutex_unlock(&LOCK_active_mi);
+ return res;
}
-static bool fix_slave_skip_counter(sys_var *self, THD *thd, enum_var_type type)
+
+
+bool update_multi_source_variable(sys_var *self_var, THD *thd,
+ enum_var_type type)
{
- mysql_mutex_unlock(&LOCK_global_system_variables);
+ Sys_var_multi_source_ulong *self= (Sys_var_multi_source_ulong*) self_var;
+ bool result= true;
+ Master_info *mi;
+
mysql_mutex_lock(&LOCK_active_mi);
- mysql_mutex_lock(&active_mi->rli.run_lock);
- /*
- The following test should normally never be true as we test this
- in the check function; To be safe against multiple
- SQL_SLAVE_SKIP_COUNTER request, we do the check anyway
- */
- if (!active_mi->rli.slave_running)
+ mi= master_info_index->
+ get_master_info(&thd->variables.default_master_connection,
+ MYSQL_ERROR::WARN_LEVEL_ERROR);
+ if (mi)
{
- mysql_mutex_lock(&active_mi->rli.data_lock);
- active_mi->rli.slave_skip_counter= sql_slave_skip_counter;
- mysql_mutex_unlock(&active_mi->rli.data_lock);
+ mysql_mutex_lock(&mi->rli.run_lock);
+ mysql_mutex_lock(&mi->rli.data_lock);
+ result= self->update_variable(thd, mi);
+ mysql_mutex_unlock(&mi->rli.data_lock);
+ mysql_mutex_unlock(&mi->rli.run_lock);
}
- mysql_mutex_unlock(&active_mi->rli.run_lock);
mysql_mutex_unlock(&LOCK_active_mi);
- mysql_mutex_lock(&LOCK_global_system_variables);
- return 0;
+ return result;
+}
+
+static bool update_slave_skip_counter(sys_var *self, THD *thd, Master_info *mi)
+{
+ if (mi->rli.slave_running)
+ {
+ my_error(ER_SLAVE_MUST_STOP, MYF(0), mi->connection_name.length,
+ mi->connection_name.str);
+ return true;
+ }
+ /* The value was stored temporarly in thd */
+ mi->rli.slave_skip_counter= thd->variables.slave_skip_counter;
+ return false;
+}
+
+
+static Sys_var_multi_source_ulong
+Sys_slave_skip_counter("sql_slave_skip_counter",
+ "Skip the next N events from the master log",
+ SESSION_VAR(slave_skip_counter),
+ NO_CMD_LINE,
+ my_offsetof(Master_info, rli.slave_skip_counter),
+ VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1),
+ ON_UPDATE(update_slave_skip_counter));
+
+
+static bool update_max_relay_log_size(sys_var *self, THD *thd, Master_info *mi)
+{
+ mi->rli.max_relay_log_size= thd->variables.max_relay_log_size;
+ mi->rli.relay_log.set_max_size(mi->rli.max_relay_log_size);
+ return false;
}
-static Sys_var_uint Sys_slave_skip_counter(
- "sql_slave_skip_counter", "sql_slave_skip_counter",
- GLOBAL_VAR(sql_slave_skip_counter), NO_CMD_LINE,
- VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1),
- NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(check_slave_skip_counter),
- ON_UPDATE(fix_slave_skip_counter));
+
+static Sys_var_multi_source_ulong
+Sys_max_relay_log_size( "max_relay_log_size",
+ "relay log will be rotated automatically when the "
+ "size exceeds this value. If 0 are startup, it's "
+ "set to max_binlog_size",
+ SESSION_VAR(max_relay_log_size),
+ CMD_LINE(REQUIRED_ARG),
+ my_offsetof(Master_info, rli.max_relay_log_size),
+ VALID_RANGE(0, 1024L*1024*1024), DEFAULT(0),
+ BLOCK_SIZE(IO_SIZE),
+ ON_UPDATE(update_max_relay_log_size));
static Sys_var_charptr Sys_slave_skip_errors(
"slave_skip_errors", "Tells the slave thread to continue "
@@ -3791,6 +3740,15 @@ static Sys_var_ulong Sys_progress_report_time(
SESSION_VAR(progress_report_time), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, UINT_MAX), DEFAULT(56), BLOCK_SIZE(1));
+const char *use_stat_tables_modes[] =
+ {"NEVER", "COMPLEMENTARY", "PREFERABLY", 0};
+static Sys_var_enum Sys_optimizer_use_stat_tables(
+ "use_stat_tables",
+ "Specifies how to use system statistics tables. Possible values are "
+ "NEVER, COMPLEMENTARY, PREVERABLY",
+ SESSION_VAR(use_stat_tables), CMD_LINE(REQUIRED_ARG),
+ use_stat_tables_modes, DEFAULT(0));
+
static Sys_var_mybool Sys_no_thread_alarm(
"debug_no_thread_alarm",
"Disable system thread alarm calls. Disabling it may be useful "
diff --git a/sql/sys_vars.h b/sql/sys_vars.h
index 95a8e45f7c3..1729dcefd63 100644
--- a/sql/sys_vars.h
+++ b/sql/sys_vars.h
@@ -566,11 +566,13 @@ public:
option.var_type= GET_STR;
}
+ bool do_check(THD *thd, set_var *var)
+ {
+ return Sys_var_charptr::do_string_check(thd, var, charset(thd));
+ }
bool check_update_type(Item_result type)
{ return type != STRING_RESULT; }
- bool do_check(THD *thd, set_var *var);
-
void session_save_default(THD *thd, set_var *var)
{ DBUG_ASSERT(FALSE); }
@@ -588,8 +590,6 @@ public:
protected:
uchar *global_value_ptr(THD *thd, LEX_STRING *base);
bool set_filter_value(const char *value);
- void lock(void);
- void unlock(void);
};
/**
@@ -633,6 +633,93 @@ public:
}
};
+
+/*
+ A LEX_STRING stored only in thd->variables
+ Only to be used for small buffers
+*/
+
+class Sys_var_session_lexstring: public sys_var
+{
+ size_t max_length;
+public:
+ Sys_var_session_lexstring(const char *name_arg,
+ const char *comment, int flag_args,
+ ptrdiff_t off, size_t size, CMD_LINE getopt,
+ enum charset_enum is_os_charset_arg,
+ const char *def_val, size_t max_length_arg,
+ on_check_function on_check_func=0,
+ on_update_function on_update_func=0)
+ : sys_var(&all_sys_vars, name_arg, comment, flag_args, off, getopt.id,
+ getopt.arg_type, SHOW_CHAR, (intptr)def_val,
+ 0, VARIABLE_NOT_IN_BINLOG, on_check_func, on_update_func,
+ 0),max_length(max_length_arg)
+ {
+ option.var_type= GET_NO_ARG;
+ SYSVAR_ASSERT(scope() == ONLY_SESSION)
+ *const_cast<SHOW_TYPE*>(&show_val_type)= SHOW_LEX_STRING;
+ }
+ bool do_check(THD *thd, set_var *var)
+ {
+ char buff[STRING_BUFFER_USUAL_SIZE];
+ String str(buff, sizeof(buff), system_charset_info), *res;
+
+ if (!(res=var->value->val_str(&str)))
+ {
+ var->save_result.string_value.str= 0; /* NULL */
+ var->save_result.string_value.length= 0;
+ }
+ else
+ {
+ if (res->length() > max_length)
+ {
+ my_error(ER_WRONG_STRING_LENGTH, MYF(0),
+ res->ptr(), name.str, (int) max_length);
+ return true;
+ }
+ var->save_result.string_value.str= thd->strmake(res->ptr(),
+ res->length());
+ var->save_result.string_value.length= res->length();
+ }
+ return false;
+ }
+ bool session_update(THD *thd, set_var *var)
+ {
+ LEX_STRING *tmp= &session_var(thd, LEX_STRING);
+ tmp->length= var->save_result.string_value.length;
+ /* Store as \0 terminated string (just to be safe) */
+ strmake(tmp->str, var->save_result.string_value.str, tmp->length);
+ return false;
+ }
+ bool global_update(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(FALSE);
+ return false;
+ }
+ void session_save_default(THD *thd, set_var *var)
+ {
+ char *ptr= (char*)(intptr)option.def_value;
+ var->save_result.string_value.str= ptr;
+ var->save_result.string_value.length= strlen(ptr);
+ }
+ void global_save_default(THD *thd, set_var *var)
+ {
+ DBUG_ASSERT(FALSE);
+ }
+ uchar *session_value_ptr(THD *thd, LEX_STRING *base)
+ {
+ return (uchar*) &session_var(thd, LEX_STRING);
+ }
+ uchar *global_value_ptr(THD *thd, LEX_STRING *base)
+ {
+ DBUG_ASSERT(FALSE);
+ return NULL;
+ }
+ bool check_update_type(Item_result type)
+ { return type != STRING_RESULT; }
+};
+
+
#ifndef DBUG_OFF
/**
@@session.dbug and @@global.dbug variables.
@@ -1389,6 +1476,7 @@ public:
};
#endif /* defined(ENABLED_DEBUG_SYNC) */
+
/**
The class for bit variables - a variant of boolean that stores the value
in a bit.
@@ -1854,25 +1942,82 @@ public:
const char *comment, int flag_args, ptrdiff_t off, size_t size,
CMD_LINE getopt,
const char *values[], uint def_val, PolyLock *lock,
- enum binlog_status_enum binlog_status_arg,
- on_check_function on_check_func)
+ enum binlog_status_enum binlog_status_arg)
:Sys_var_enum(name_arg, comment, flag_args, off, size, getopt,
- values, def_val, lock, binlog_status_arg, on_check_func)
+ values, def_val, lock, binlog_status_arg)
{}
bool global_update(THD *thd, set_var *var);
};
-/****************************************************************************
- Used templates
-****************************************************************************/
-
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class List<set_var_base>;
-template class List_iterator_fast<set_var_base>;
-template class Sys_var_integer<int, GET_INT, SHOW_SINT>;
-template class Sys_var_integer<uint, GET_UINT, SHOW_INT>;
-template class Sys_var_integer<ulong, GET_ULONG, SHOW_LONG>;
-template class Sys_var_integer<ha_rows, GET_HA_ROWS, SHOW_HA_ROWS>;
-template class Sys_var_integer<ulonglong, GET_ULL, SHOW_LONGLONG>;
-#endif
+/*
+ Class for handing multi-source replication variables
+ Variable values are store in Master_info, but to make it possible to
+ access variable without locks we also store it thd->variables.
+ These can be used as GLOBAL or SESSION, but both points to the same
+ variable. This is to make things compatible with MySQL 5.5 where variables
+ like sql_slave_skip_counter are GLOBAL.
+*/
+
+class Sys_var_multi_source_ulong;
+class Master_info;
+
+typedef bool (*on_multi_source_update_function)(sys_var *self, THD *thd,
+ Master_info *mi);
+bool update_multi_source_variable(sys_var *self,
+ THD *thd, enum_var_type type);
+
+
+class Sys_var_multi_source_ulong :public Sys_var_ulong
+{
+ ptrdiff_t master_info_offset;
+ on_multi_source_update_function update_multi_source_variable_func;
+public:
+ Sys_var_multi_source_ulong(const char *name_arg,
+ const char *comment, int flag_args,
+ ptrdiff_t off, size_t size,
+ CMD_LINE getopt,
+ ptrdiff_t master_info_offset_arg,
+ uint min_val, uint max_val, uint def_val,
+ uint block_size,
+ on_multi_source_update_function on_update_func)
+ :Sys_var_ulong(name_arg, comment, flag_args, off, size,
+ getopt, min_val, max_val, def_val, block_size,
+ 0, VARIABLE_NOT_IN_BINLOG, 0, update_multi_source_variable),
+ master_info_offset(master_info_offset_arg),
+ update_multi_source_variable_func(on_update_func)
+ {
+ }
+ bool session_update(THD *thd, set_var *var)
+ {
+ session_var(thd, uint)= (uint) (var->save_result.ulonglong_value);
+ /* Value should be moved to multi_master in on_update_func */
+ return false;
+ }
+ bool global_update(THD *thd, set_var *var)
+ {
+ return session_update(thd, var);
+ }
+ void session_save_default(THD *thd, set_var *var)
+ {
+ /* Use value given in variable declaration */
+ global_save_default(thd, var);
+ }
+ uchar *session_value_ptr(THD *thd,LEX_STRING *base)
+ {
+ uint *tmp, res;
+ tmp= (uint*) (((uchar*)&(thd->variables)) + offset);
+ res= get_master_info_uint_value(thd, master_info_offset);
+ *tmp= res;
+ return (uchar*) tmp;
+ }
+ uchar *global_value_ptr(THD *thd, LEX_STRING *base)
+ {
+ return session_value_ptr(thd, base);
+ }
+ uint get_master_info_uint_value(THD *thd, ptrdiff_t offset);
+ bool update_variable(THD *thd, Master_info *mi)
+ {
+ return update_multi_source_variable_func(this, thd, mi);
+ }
+};
diff --git a/sql/table.cc b/sql/table.cc
index fba45f421ed..a011fa845a7 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -39,6 +39,7 @@
#include "my_bit.h"
#include "sql_select.h"
#include "sql_derived.h"
+#include "sql_statistics.h"
#include "mdl.h" // MDL_wait_for_graph_visitor
/* INFORMATION_SCHEMA name */
@@ -306,7 +307,7 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, char *key,
path_length= build_table_filename(path, sizeof(path) - 1,
table_list->db,
table_list->table_name, "", 0);
- init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+ init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
if (multi_alloc_root(&mem_root,
&share, sizeof(*share),
&key_buff, key_length,
@@ -339,6 +340,8 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, char *key,
share->free_tables.empty();
share->m_flush_tickets.empty();
+ init_sql_alloc(&share->stats_cb.mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
+
memcpy((char*) &share->mem_root, (char*) &mem_root, sizeof(mem_root));
mysql_mutex_init(key_TABLE_SHARE_LOCK_ha_data,
&share->LOCK_ha_data, MY_MUTEX_INIT_FAST);
@@ -378,7 +381,12 @@ void init_tmp_table_share(THD *thd, TABLE_SHARE *share, const char *key,
DBUG_PRINT("enter", ("table: '%s'.'%s'", key, table_name));
bzero((char*) share, sizeof(*share));
- init_sql_alloc(&share->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+ /*
+ This can't be MY_THREAD_SPECIFIC for slaves as they are freed
+ during cleanup() from Relay_log_info::close_temporary_tables()
+ */
+ init_sql_alloc(&share->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0,
+ MYF(thd->slave_thread ? 0 : MY_THREAD_SPECIFIC));
share->table_category= TABLE_CATEGORY_TEMPORARY;
share->tmp_table= INTERNAL_TMP_TABLE;
share->db.str= (char*) key;
@@ -419,6 +427,14 @@ void TABLE_SHARE::destroy()
uint idx;
KEY *info_it;
+ if (tmp_table == NO_TMP_TABLE)
+ mysql_mutex_lock(&LOCK_ha_data);
+ free_root(&stats_cb.mem_root, MYF(0));
+ stats_cb.stats_can_be_read= FALSE;
+ stats_cb.stats_is_read= FALSE;
+ if (tmp_table == NO_TMP_TABLE)
+ mysql_mutex_unlock(&LOCK_ha_data);
+
/* The mutex is initialized only for shares that are part of the TDC */
if (tmp_table == NO_TMP_TABLE)
mysql_mutex_destroy(&LOCK_ha_data);
@@ -544,6 +560,13 @@ inline bool is_system_table_name(const char *name, uint length)
my_tolower(ci, name[2]) == 'm' &&
my_tolower(ci, name[3]) == 'e') ||
+ /* one of mysql.*_stat tables */
+ (my_tolower(ci, name[length-5]) == 's' &&
+ my_tolower(ci, name[length-4]) == 't' &&
+ my_tolower(ci, name[length-3]) == 'a' &&
+ my_tolower(ci, name[length-2]) == 't' &&
+ my_tolower(ci, name[length-1]) == 's') ||
+
/* mysql.event table */
(my_tolower(ci, name[0]) == 'e' &&
my_tolower(ci, name[1]) == 'v' &&
@@ -753,7 +776,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
uchar forminfo[288];
uchar *record;
uchar *disk_buff, *strpos, *null_flags, *null_pos;
- ulong pos, record_offset;
+ ulong pos, record_offset;
ulong *rec_per_key= NULL;
ulong rec_buff_length;
handler *handler_file= 0;
@@ -1251,6 +1274,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
com_length= uint2korr(forminfo+284);
vcol_screen_length= uint2korr(forminfo+286);
share->vfields= 0;
+ share->default_fields= 0;
share->stored_fields= share->fields;
if (forminfo[46] != (uchar)255)
{
@@ -1582,8 +1606,6 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
if (reg_field->unireg_check == Field::NEXT_NUMBER)
share->found_next_number_field= field_ptr;
- if (share->timestamp_field == reg_field)
- share->timestamp_field_offset= i;
if (use_hash)
{
@@ -1605,6 +1627,9 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
if (share->stored_rec_length>=recpos)
share->stored_rec_length= recpos-1;
}
+ if (reg_field->has_insert_default_function() ||
+ reg_field->has_update_default_function())
+ ++share->default_fields;
}
*field_ptr=0; // End marker
/* Sanity checks: */
@@ -2319,7 +2344,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
uint records, i, bitmap_size;
bool error_reported= FALSE;
uchar *record, *bitmaps;
- Field **field_ptr, **vfield_ptr;
+ Field **field_ptr, **UNINIT_VAR(vfield_ptr), **UNINIT_VAR(dfield_ptr);
uint8 save_context_analysis_only= thd->lex->context_analysis_only;
DBUG_ENTER("open_table_from_share");
DBUG_PRINT("enter",("name: '%s.%s' form: 0x%lx", share->db.str,
@@ -2334,7 +2359,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
outparam->db_stat= db_stat;
outparam->write_row_record= NULL;
- init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
+ init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
if (outparam->alias.copy(alias, strlen(alias), table_alias_charset))
goto err;
@@ -2423,9 +2448,6 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
if (share->found_next_number_field)
outparam->found_next_number_field=
outparam->field[(uint) (share->found_next_number_field - share->field)];
- if (share->timestamp_field)
- outparam->timestamp_field= (Field_timestamp*) outparam->field[share->timestamp_field_offset];
-
/* Fix key->name and key_part->field */
if (share->key_parts)
@@ -2476,11 +2498,9 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
}
/*
- Process virtual columns, if any.
+ Process virtual and default columns, if any.
*/
- if (!share->vfields)
- outparam->vfield= NULL;
- else
+ if (share->vfields)
{
if (!(vfield_ptr = (Field **) alloc_root(&outparam->mem_root,
(uint) ((share->vfields+1)*
@@ -2488,10 +2508,24 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
goto err;
outparam->vfield= vfield_ptr;
+ }
+ if (share->default_fields)
+ {
+ if (!(dfield_ptr = (Field **) alloc_root(&outparam->mem_root,
+ (uint) ((share->default_fields+1)*
+ sizeof(Field*)))))
+ goto err;
+
+ outparam->default_field= dfield_ptr;
+ }
+
+ if (share->vfields || share->default_fields)
+ {
+ /* Reuse the same loop both for virtual and default fields. */
for (field_ptr= outparam->field; *field_ptr; field_ptr++)
{
- if ((*field_ptr)->vcol_info)
+ if (share->vfields && (*field_ptr)->vcol_info)
{
if (unpack_vcol_info_from_frm(thd,
&outparam->mem_root,
@@ -2505,8 +2539,15 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
}
*(vfield_ptr++)= *field_ptr;
}
+ if (share->default_fields &&
+ ((*field_ptr)->has_insert_default_function() ||
+ (*field_ptr)->has_update_default_function()))
+ *(dfield_ptr++)= *field_ptr;
}
- *vfield_ptr= 0; // End marker
+ if (share->vfields)
+ *vfield_ptr= 0; // End marker
+ if (share->default_fields)
+ *dfield_ptr= 0; // End marker
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -3932,9 +3973,6 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
DBUG_ASSERT(!auto_increment_field_not_null);
auto_increment_field_not_null= FALSE;
- if (timestamp_field)
- timestamp_field_type= timestamp_field->get_auto_set_type();
-
pos_in_table_list= tl;
clear_column_bitmaps();
@@ -5874,6 +5912,51 @@ void TABLE::mark_virtual_columns_for_write(bool insert_fl)
/**
+ Check if a table has a default function either for INSERT or UPDATE-like
+ operation
+ @retval true there is a default function
+ @retval false there is no default function
+*/
+
+bool TABLE::has_default_function(bool is_update)
+{
+ Field **dfield_ptr, *dfield;
+ bool res= false;
+ for (dfield_ptr= default_field; *dfield_ptr; dfield_ptr++)
+ {
+ dfield= (*dfield_ptr);
+ if (is_update)
+ res= dfield->has_update_default_function();
+ else
+ res= dfield->has_insert_default_function();
+ if (res)
+ return res;
+ }
+ return res;
+}
+
+
+/**
+ Add all fields that have a default function to the table write set.
+*/
+
+void TABLE::mark_default_fields_for_write()
+{
+ Field **dfield_ptr, *dfield;
+ enum_sql_command cmd= in_use->lex->sql_command;
+ for (dfield_ptr= default_field; *dfield_ptr; dfield_ptr++)
+ {
+ dfield= (*dfield_ptr);
+ if (((sql_command_flags[cmd] & CF_INSERTS_DATA) &&
+ dfield->has_insert_default_function()) ||
+ ((sql_command_flags[cmd] & CF_UPDATES_DATA) &&
+ dfield->has_update_default_function()))
+ bitmap_set_bit(write_set, dfield->field_index);
+ }
+}
+
+
+/**
@brief
Allocate space for keys
@@ -5990,6 +6073,7 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
keyinfo->flags= HA_GENERATED_KEY;
keyinfo->ext_key_flags= keyinfo->flags;
+ keyinfo->is_statistics_from_stat_tables= FALSE;
if (unique)
keyinfo->flags|= HA_NOSAME;
sprintf(buf, "key%i", key);
@@ -6000,6 +6084,8 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
if (!keyinfo->rec_per_key)
return TRUE;
bzero(keyinfo->rec_per_key, sizeof(ulong)*key_parts);
+ keyinfo->read_stats= NULL;
+ keyinfo->collected_stats= NULL;
for (i= 0; i < key_parts; i++)
{
@@ -6482,6 +6568,56 @@ int update_virtual_fields(THD *thd, TABLE *table,
DBUG_RETURN(0);
}
+
+/**
+ Update all DEFAULT and/or ON INSERT fields.
+
+ @details
+ Compute and set the default value of all fields with a default function.
+ There are two kinds of default functions - one is used for INSERT-like
+ operations, the other for UPDATE-like operations. Depending on the field
+ definition and the current operation one or the other kind of update
+ function is evaluated.
+
+ @retval
+ 0 Success
+ @retval
+ >0 Error occurred when storing a virtual field value
+*/
+
+int TABLE::update_default_fields()
+{
+ DBUG_ENTER("update_default_fields");
+ Field **dfield_ptr, *dfield;
+ int res= 0;
+ enum_sql_command cmd= in_use->lex->sql_command;
+
+ DBUG_ASSERT(default_field);
+
+ /* Iterate over virtual fields in the table */
+ for (dfield_ptr= default_field; *dfield_ptr; dfield_ptr++)
+ {
+ dfield= (*dfield_ptr);
+ /*
+ If an explicit default value for a filed overrides the default,
+ do not update the field with its automatic default value.
+ */
+ if (!(dfield->flags & HAS_EXPLICIT_VALUE))
+ {
+ if (sql_command_flags[cmd] & CF_INSERTS_DATA)
+ res= dfield->evaluate_insert_default_function();
+ if (sql_command_flags[cmd] & CF_UPDATES_DATA)
+ res= dfield->evaluate_update_default_function();
+ if (res)
+ DBUG_RETURN(res);
+ }
+ /* Unset the explicit default flag for the next record. */
+ dfield->flags&= ~HAS_EXPLICIT_VALUE;
+ }
+ DBUG_RETURN(res);
+}
+
+
/*
@brief Reset const_table flag
@@ -6693,6 +6829,7 @@ int TABLE_LIST::fetch_number_of_rows()
{
table->file->stats.records= ((select_union*)derived->result)->records;
set_if_bigger(table->file->stats.records, 2);
+ table->used_stat_records= table->file->stats.records;
}
else
error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
@@ -6789,11 +6926,11 @@ uint TABLE_SHARE::actual_n_key_parts(THD *thd)
}
-/*****************************************************************************
-** Instansiate templates
-*****************************************************************************/
+double KEY::actual_rec_per_key(uint i)
+{
+ if (rec_per_key == 0)
+ return 0;
+ return (is_statistics_from_stat_tables ?
+ read_stats->get_avg_frequency(i) : (double) rec_per_key[i]);
+}
-#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
-template class List<String>;
-template class List_iterator<String>;
-#endif
diff --git a/sql/table.h b/sql/table.h
index c21b01f53f7..1a567ae75d1 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -29,6 +29,7 @@
#include "handler.h" /* row_type, ha_choice, handler */
#include "mysql_com.h" /* enum_field_types */
#include "thr_lock.h" /* thr_lock_type */
+#include "filesort_utils.h"
/* Structs that defines the TABLE */
@@ -45,6 +46,7 @@ struct TABLE_LIST;
class ACL_internal_schema_access;
class ACL_internal_table_access;
class Field;
+class Table_statistics;
/*
Used to identify NESTED_JOIN structures within a join (applicable only to
@@ -306,11 +308,13 @@ enum enum_vcol_update_mode
VCOL_UPDATE_ALL
};
-typedef struct st_filesort_info
+class Filesort_info
{
+ /// Buffer for sorting keys.
+ Filesort_buffer filesort_buffer;
+
+public:
IO_CACHE *io_cache; /* If sorted through filesort */
- uchar **sort_keys; /* Buffer for sorting keys */
- uint keys; /* Number of key pointers in buffer */
uchar *buffpek; /* Buffer for buffpek structures */
uint buffpek_len; /* Max number of buffpeks in the buffer */
uchar *addon_buf; /* Pointer to a buffer if sorted with fields */
@@ -319,28 +323,40 @@ typedef struct st_filesort_info
void (*unpack)(struct st_sort_addon_field *, uchar *, uchar *); /* To unpack back */
uchar *record_pointers; /* If sorted in memory */
ha_rows found_records; /* How many records in sort */
-} FILESORT_INFO;
+ /** Sort filesort_buffer */
+ void sort_buffer(Sort_param *param, uint count)
+ { filesort_buffer.sort_buffer(param, count); }
-/*
- Values in this enum are used to indicate how a tables TIMESTAMP field
- should be treated. It can be set to the current timestamp on insert or
- update or both.
- WARNING: The values are used for bit operations. If you change the
- enum, you must keep the bitwise relation of the values. For example:
- (int) TIMESTAMP_AUTO_SET_ON_BOTH must be equal to
- (int) TIMESTAMP_AUTO_SET_ON_INSERT | (int) TIMESTAMP_AUTO_SET_ON_UPDATE.
- We use an enum here so that the debugger can display the value names.
-*/
-enum timestamp_auto_set_type
-{
- TIMESTAMP_NO_AUTO_SET= 0, TIMESTAMP_AUTO_SET_ON_INSERT= 1,
- TIMESTAMP_AUTO_SET_ON_UPDATE= 2, TIMESTAMP_AUTO_SET_ON_BOTH= 3
+ /**
+ Accessors for Filesort_buffer (which @c).
+ */
+ uchar *get_record_buffer(uint idx)
+ { return filesort_buffer.get_record_buffer(idx); }
+
+ uchar **get_sort_keys()
+ { return filesort_buffer.get_sort_keys(); }
+
+ uchar **alloc_sort_buffer(uint num_records, uint record_length)
+ { return filesort_buffer.alloc_sort_buffer(num_records, record_length); }
+
+ bool check_sort_buffer_properties(uint num_records, uint record_length)
+ {
+ return filesort_buffer.check_sort_buffer_properties(num_records,
+ record_length);
+ }
+
+ void free_sort_buffer()
+ { filesort_buffer.free_sort_buffer(); }
+
+ void init_record_pointers()
+ { filesort_buffer.init_record_pointers(); }
+
+ size_t sort_buffer_size() const
+ { return filesort_buffer.sort_buffer_size(); }
};
-#define clear_timestamp_auto_bits(_target_, _bits_) \
- (_target_)= (enum timestamp_auto_set_type)((int)(_target_) & ~(int)(_bits_))
-class Field_timestamp;
+
class Field_blob;
class Table_triggers_list;
@@ -547,6 +563,21 @@ typedef I_P_List <Wait_for_flush,
/**
+ Control block to access table statistics loaded
+ from persistent statistical tables
+*/
+
+struct TABLE_STATISTICS_CB
+{
+ MEM_ROOT mem_root; /* MEM_ROOT to allocate statistical data for the table */
+ Table_statistics *table_stats; /* Structure to access the statistical data */
+ bool stats_can_be_read; /* Memory for statistical data is allocated */
+ bool stats_is_read; /* Statistical data for table has been read
+ from statistical tables */
+};
+
+
+/**
This structure is shared between different table objects. There is one
instance of table share per one table in the database.
*/
@@ -580,10 +611,11 @@ struct TABLE_SHARE
/* The following is copied to each TABLE on OPEN */
Field **field;
Field **found_next_number_field;
- Field *timestamp_field; /* Used only during open */
KEY *key_info; /* data of keys in database */
uint *blob_field; /* Index to blobs in Field arrray*/
+ TABLE_STATISTICS_CB stats_cb;
+
uchar *default_values; /* row with default values */
LEX_STRING comment; /* Comment about table */
CHARSET_INFO *table_charset; /* Default charset of string fields */
@@ -652,7 +684,6 @@ struct TABLE_SHARE
uint uniques; /* Number of UNIQUE index */
uint null_fields; /* number of null fields */
uint blob_fields; /* number of blob fields */
- uint timestamp_field_offset; /* Field number for timestamp field */
uint varchar_fields; /* number of varchar fields */
uint db_create_options; /* Create options from database */
uint db_options_in_use; /* Options in use */
@@ -667,6 +698,7 @@ struct TABLE_SHARE
uint column_bitmap_size;
uchar frm_version;
uint vfields; /* Number of computed (virtual) fields */
+ uint default_fields; /* Number of default fields */
bool use_ext_keys; /* Extended keys can be used */
bool null_field_first;
bool system; /* Set if system table (one record) */
@@ -979,8 +1011,9 @@ public:
Field *next_number_field; /* Set if next_number is activated */
Field *found_next_number_field; /* Set on open */
- Field_timestamp *timestamp_field;
Field **vfield; /* Pointer to virtual fields*/
+ /* Fields that are updated automatically on INSERT or UPDATE. */
+ Field **default_field;
/* Table's triggers, 0 if there are no of them */
Table_triggers_list *triggers;
@@ -1014,6 +1047,15 @@ public:
*/
query_id_t query_id;
+ /*
+ This structure is used for statistical data on the table that
+ is collected by the function collect_statistics_for_table
+ */
+ Table_statistics *collected_stats;
+
+ /* The estimate of the number of records in the table used by optimizer */
+ ha_rows used_stat_records;
+
/*
For each key that has quick_keys.is_set(key) == TRUE: estimate of #records
and max #key parts that range access would use.
@@ -1036,19 +1078,6 @@ public:
*/
ha_rows quick_condition_rows;
- /*
- If this table has TIMESTAMP field with auto-set property (pointed by
- timestamp_field member) then this variable indicates during which
- operations (insert only/on update/in both cases) we should set this
- field to current timestamp. If there are no such field in this table
- or we should not automatically set its value during execution of current
- statement then the variable contains TIMESTAMP_NO_AUTO_SET (i.e. 0).
-
- Value of this variable is set for each statement in open_table() and
- if needed cleared later in statement processing code (see mysql_update()
- as example).
- */
- timestamp_auto_set_type timestamp_field_type;
table_map map; /* ID bit of table (1,2,4,8,16...) */
uint lock_position; /* Position in MYSQL_LOCK.table */
@@ -1118,7 +1147,12 @@ public:
See TABLE_LIST::process_index_hints().
*/
bool force_index_group;
- bool distinct,const_table,no_rows, used_for_duplicate_elimination;
+ /*
+ TRUE<=> this table was created with create_tmp_table(... distinct=TRUE..)
+ call
+ */
+ bool distinct;
+ bool const_table,no_rows, used_for_duplicate_elimination;
/**
If set, the optimizer has found that row retrieval should access index
@@ -1148,7 +1182,7 @@ public:
REGINFO reginfo; /* field connections */
MEM_ROOT mem_root;
GRANT_INFO grant;
- FILESORT_INFO sort;
+ Filesort_info sort;
/*
The arena which the items for expressions from the table definition
are associated with.
@@ -1162,6 +1196,7 @@ public:
bool no_partitions_used; /* If true, all partitions have been pruned away */
#endif
uint max_keys; /* Size of allocated key_info array. */
+ bool stats_is_read; /* Persistent statistics is read for the table */
MDL_ticket *mdl_ticket;
void init(THD *thd, TABLE_LIST *tl);
@@ -1179,6 +1214,8 @@ public:
void mark_columns_needed_for_insert(void);
bool mark_virtual_col(Field *field);
void mark_virtual_columns_for_write(bool insert_fl);
+ void mark_default_fields_for_write();
+ bool has_default_function(bool is_update);
inline void column_bitmaps_set(MY_BITMAP *read_set_arg,
MY_BITMAP *write_set_arg)
{
@@ -1265,6 +1302,8 @@ public:
bool update_const_key_parts(COND *conds);
uint actual_n_key_parts(KEY *keyinfo);
ulong actual_key_flags(KEY *keyinfo);
+ int update_default_fields();
+ inline ha_rows stat_records() { return used_stat_records; }
};
diff --git a/sql/thr_malloc.cc b/sql/thr_malloc.cc
index cedcbefc26f..8c7db0673ac 100644
--- a/sql/thr_malloc.cc
+++ b/sql/thr_malloc.cc
@@ -61,9 +61,10 @@ extern "C" {
}
}
-void init_sql_alloc(MEM_ROOT *mem_root, uint block_size, uint pre_alloc)
+void init_sql_alloc(MEM_ROOT *mem_root, uint block_size, uint pre_alloc,
+ myf my_flags)
{
- init_alloc_root(mem_root, block_size, pre_alloc);
+ init_alloc_root(mem_root, block_size, pre_alloc, my_flags);
mem_root->error_handler=sql_alloc_error_handler;
}
diff --git a/sql/thr_malloc.h b/sql/thr_malloc.h
index 81b7d3cc238..0b17c5cdaf1 100644
--- a/sql/thr_malloc.h
+++ b/sql/thr_malloc.h
@@ -20,7 +20,8 @@
typedef struct st_mem_root MEM_ROOT;
-void init_sql_alloc(MEM_ROOT *root, uint block_size, uint pre_alloc_size);
+void init_sql_alloc(MEM_ROOT *root, uint block_size, uint pre_alloc_size,
+ myf my_flags);
void *sql_alloc(size_t);
void *sql_calloc(size_t);
char *sql_strdup(const char *str);
diff --git a/sql/tztime.cc b/sql/tztime.cc
index 8194367a7f9..b16cc65d6bb 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1637,7 +1637,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
my_hash_free(&tz_names);
goto end;
}
- init_sql_alloc(&tz_storage, 32 * 1024, 0);
+ init_sql_alloc(&tz_storage, 32 * 1024, 0, MYF(0));
mysql_mutex_init(key_tz_LOCK, &tz_LOCK, MY_MUTEX_INIT_FAST);
tz_inited= 1;
@@ -1800,7 +1800,7 @@ end:
else
{
/* Remember that we don't have a THD */
- my_pthread_setspecific_ptr(THR_THD, 0);
+ set_current_thd(0);
my_pthread_setspecific_ptr(THR_MALLOC, 0);
}
@@ -2537,7 +2537,7 @@ scan_tz_dir(char * name_end)
}
else if (MY_S_ISREG(cur_dir->dir_entry[i].mystat->st_mode))
{
- init_alloc_root(&tz_storage, 32768, 0);
+ init_alloc_root(&tz_storage, 32768, 0, MYF(MY_THREAD_SPECIFIC));
if (!tz_load(fullname, &tz_info, &tz_storage))
print_tz_as_sql(root_name_end + 1, &tz_info);
else
@@ -2595,7 +2595,7 @@ main(int argc, char **argv)
}
else
{
- init_alloc_root(&tz_storage, 32768, 0);
+ init_alloc_root(&tz_storage, 32768, 0, MYF(0));
if (strcmp(argv[1], "--leap") == 0)
{
@@ -2672,7 +2672,7 @@ main(int argc, char **argv)
MY_INIT(argv[0]);
- init_alloc_root(&tz_storage, 32768, 0);
+ init_alloc_root(&tz_storage, 32768, MYF(0));
/* let us set some well known timezone */
setenv("TZ", "MET", 1);
diff --git a/sql/uniques.cc b/sql/uniques.cc
index 72411be5cd6..9fa06311ece 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -87,10 +87,11 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
if (min_dupl_count_arg)
full_size+= sizeof(element_count);
my_b_clear(&file);
- init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func, 0,
- NULL, comp_func_fixed_arg);
+ init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func,
+ NULL, comp_func_fixed_arg, MYF(MY_THREAD_SPECIFIC));
/* If the following fail's the next add will also fail */
- my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16);
+ my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16,
+ MYF(MY_THREAD_SPECIFIC));
/*
If you change the following, change it in get_max_elements function, too.
*/
@@ -608,9 +609,9 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
if (flush_io_cache(&file) || reinit_io_cache(&file, READ_CACHE, 0L, 0, 0))
return 1;
ulong buff_sz= (max_in_memory_size / full_size + 1) * full_size;
- if (!(merge_buffer= (uchar *) my_malloc((ulong) buff_sz, MYF(0))))
+ if (!(merge_buffer= (uchar *) my_malloc(buff_sz, MYF(MY_THREAD_SPECIFIC))))
return 1;
- if (buff_sz < (ulong) (full_size * (file_ptrs.elements + 1)))
+ if (buff_sz < full_size * (file_ptrs.elements + 1UL))
res= merge(table, merge_buffer, buff_sz >= full_size * MERGEBUFF2) ;
if (!res)
@@ -644,7 +645,6 @@ bool Unique::walk(TABLE *table, tree_walk_action action, void *walk_action_arg)
bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge)
{
- SORTPARAM sort_param;
IO_CACHE *outfile= table->sort.io_cache;
BUFFPEK *file_ptr= (BUFFPEK*) file_ptrs.buffer;
uint maxbuffer= file_ptrs.elements - 1;
@@ -654,7 +654,7 @@ bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge)
/* Open cached file if it isn't open */
if (!outfile)
outfile= table->sort.io_cache= (IO_CACHE*) my_malloc(sizeof(IO_CACHE),
- MYF(MY_ZEROFILL));
+ MYF(MY_THREAD_SPECIFIC|MY_ZEROFILL));
if (!outfile ||
(! my_b_inited(outfile) &&
open_cached_file(outfile,mysql_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER,
@@ -662,6 +662,7 @@ bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge)
return 1;
reinit_io_cache(outfile,WRITE_CACHE,0L,0,0);
+ Sort_param sort_param;
bzero((char*) &sort_param,sizeof(sort_param));
sort_param.max_rows= elements;
sort_param.sort_form= table;
@@ -669,10 +670,12 @@ bool Unique::merge(TABLE *table, uchar *buff, bool without_last_merge)
full_size;
sort_param.min_dupl_count= min_dupl_count;
sort_param.res_length= 0;
- sort_param.keys= (uint) (max_in_memory_size / sort_param.sort_length);
+ sort_param.max_keys_per_buffer=
+ (uint) (max_in_memory_size / sort_param.sort_length);
sort_param.not_killable= 1;
- sort_param.unique_buff= buff + (sort_param.keys * sort_param.sort_length);
+ sort_param.unique_buff= buff +(sort_param.max_keys_per_buffer *
+ sort_param.sort_length);
sort_param.compare= (qsort2_cmp) buffpek_compare;
sort_param.cmp_context.key_compare= tree.compare;
@@ -722,7 +725,7 @@ bool Unique::get(TABLE *table)
{
/* Whole tree is in memory; Don't use disk if you don't need to */
if ((record_pointers=table->sort.record_pointers= (uchar*)
- my_malloc(size * tree.elements_in_tree, MYF(0))))
+ my_malloc(size * tree.elements_in_tree, MYF(MY_THREAD_SPECIFIC))))
{
tree_walk_action action= min_dupl_count ?
(tree_walk_action) unique_intersect_write_to_ptrs :
@@ -739,7 +742,7 @@ bool Unique::get(TABLE *table)
return 1;
ulong buff_sz= (max_in_memory_size / full_size + 1) * full_size;
- if (!(sort_buffer= (uchar*) my_malloc(buff_sz, MYF(0))))
+ if (!(sort_buffer= (uchar*) my_malloc(buff_sz, MYF(MY_THREAD_SPECIFIC))))
return 1;
if (merge(table, sort_buffer, FALSE))
diff --git a/sql/unireg.cc b/sql/unireg.cc
index edcfe9eb934..20d101be1a3 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -282,7 +282,7 @@ bool mysql_create_frm(THD *thd, const char *file_name,
}
key_buff_length= uint4korr(fileinfo+47);
- keybuff=(uchar*) my_malloc(key_buff_length, MYF(0));
+ keybuff=(uchar*) my_malloc(key_buff_length, MYF(MY_THREAD_SPECIFIC));
key_info_length= pack_keys(keybuff, keys, key_info, data_offset);
/*
@@ -534,7 +534,7 @@ static uchar *pack_screens(List<Create_field> &create_fields,
while ((field=it++))
length+=(uint) strlen(field->field_name)+1+TE_INFO_LENGTH+cols/2;
- if (!(info=(uchar*) my_malloc(length,MYF(MY_WME))))
+ if (!(info=(uchar*) my_malloc(length,MYF(MY_WME | MY_THREAD_SPECIFIC))))
DBUG_RETURN(0);
start_screen=0;
@@ -1108,7 +1108,9 @@ static bool make_empty_rec(THD *thd, File file,enum legacy_db_type table_type,
bzero((char*) &share, sizeof(share));
table.s= &share;
- if (!(buff=(uchar*) my_malloc((size_t) reclength,MYF(MY_WME | MY_ZEROFILL))))
+ if (!(buff=(uchar*) my_malloc((size_t) reclength,
+ MYF(MY_WME | MY_ZEROFILL |
+ MY_THREAD_SPECIFIC))))
{
DBUG_RETURN(1);
}
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index e705d1dd615..5d6253a2a9f 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -905,8 +905,6 @@ int ha_archive::write_row(uchar *buf)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
ha_statistic_increment(&SSV::ha_write_count);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
mysql_mutex_lock(&share->mutex);
if (!share->archive_write_open)
@@ -1671,7 +1669,7 @@ int ha_archive::info(uint flag)
turn will keep selects from causing a sync to occur.
Basically, yet another optimizations to keep compression working well.
*/
-void ha_archive::start_bulk_insert(ha_rows rows)
+void ha_archive::start_bulk_insert(ha_rows rows, uint flags)
{
DBUG_ENTER("ha_archive::start_bulk_insert");
if (!rows || rows >= ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT)
diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h
index 165c7443070..627267c7306 100644
--- a/storage/archive/ha_archive.h
+++ b/storage/archive/ha_archive.h
@@ -82,7 +82,6 @@ public:
~ha_archive()
{
}
- const char *table_type() const { return "ARCHIVE"; }
const char *index_type(uint inx) { return "NONE"; }
const char **bas_ext() const;
ulonglong table_flags() const
@@ -137,7 +136,7 @@ public:
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
int repair(THD* thd, HA_CHECK_OPT* check_opt);
int check_for_upgrade(HA_CHECK_OPT *check_opt);
- void start_bulk_insert(ha_rows rows);
+ void start_bulk_insert(ha_rows rows, uint flags);
int end_bulk_insert();
enum row_type get_row_type() const
{
diff --git a/storage/blackhole/ha_blackhole.h b/storage/blackhole/ha_blackhole.h
index a7efd261ddb..51857f3bb2a 100644
--- a/storage/blackhole/ha_blackhole.h
+++ b/storage/blackhole/ha_blackhole.h
@@ -46,8 +46,6 @@ public:
~ha_blackhole()
{
}
- /* The name that will be used for display purposes */
- const char *table_type() const { return "BLACKHOLE"; }
/*
The name of the index type that will be used for display
don't implement this method unless you really have indexes
diff --git a/storage/cassandra/CMakeLists.txt b/storage/cassandra/CMakeLists.txt
new file mode 100644
index 00000000000..2fca1803d3c
--- /dev/null
+++ b/storage/cassandra/CMakeLists.txt
@@ -0,0 +1,49 @@
+# use the first path that has Thrift.h included, if found
+
+FIND_PATH(Thrift_INCLUDE_DIRS Thrift.h PATHS
+$ENV{THRIFT_INCLUDE} # environment variable to be used optionally
+${Thrift_INCLUDE_DIR} # this may be set
+/usr/local/include/thrift # list of additional directories to look from
+/opt/local/include/thrift
+/usr/include/thrift
+/opt/include/thrift
+)
+
+# Verify that thrift linking library is found
+FIND_LIBRARY(Thrift_LIBS NAMES thrift PATHS ${Thrift_LIB_PATHS} ${Thrift_LIB})
+IF(EXISTS ${Thrift_LIBS})
+ GET_FILENAME_COMPONENT(LINK_DIR ${Thrift_LIBS} PATH ABSOLUTE)
+ELSE()
+ RETURN()
+ENDIF()
+
+INCLUDE_DIRECTORIES(AFTER ${Thrift_INCLUDE_DIRS})
+SET(CMAKE_REQUIRED_INCLUDES ${Thrift_INCLUDE_DIRS})
+
+CHECK_CXX_SOURCE_COMPILES(
+"
+#include <Thrift.h>
+int main() { return 0; }
+" CASSANDRASE_OK)
+
+IF(CASSANDRASE_OK)
+ SET(cassandra_sources
+ ha_cassandra.cc
+ ha_cassandra.h
+ cassandra_se.h
+ cassandra_se.cc
+ gen-cpp/Cassandra.cpp
+ gen-cpp/cassandra_types.h
+ gen-cpp/cassandra_types.cpp
+ gen-cpp/cassandra_constants.h
+ gen-cpp/cassandra_constants.cpp
+ gen-cpp/Cassandra.h)
+
+ STRING(REPLACE "-fno-exceptions" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
+ STRING(REPLACE "-fno-implicit-templates" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
+ LINK_DIRECTORIES(${LINK_DIR})
+
+ MYSQL_ADD_PLUGIN(cassandra ${cassandra_sources} STORAGE_ENGINE MODULE_ONLY LINK_LIBRARIES thrift COMPONENT CassandraSE)
+ INSTALL(FILES cassandra.cnf DESTINATION ${INSTALL_SYSCONFDIR}/my.cnf.d
+ COMPONENT CassandraSE)
+ENDIF(CASSANDRASE_OK)
diff --git a/storage/cassandra/cassandra.cnf b/storage/cassandra/cassandra.cnf
new file mode 100644
index 00000000000..8f4b3d6f91e
--- /dev/null
+++ b/storage/cassandra/cassandra.cnf
@@ -0,0 +1,2 @@
+[mariadb]
+plugin-load-add=ha_cassandra.so
diff --git a/storage/cassandra/cassandra_se.cc b/storage/cassandra/cassandra_se.cc
new file mode 100644
index 00000000000..0d62c5af7a6
--- /dev/null
+++ b/storage/cassandra/cassandra_se.cc
@@ -0,0 +1,800 @@
+
+// Cassandra includes:
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <sys/time.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "Thrift.h"
+#include "transport/TSocket.h"
+#include "transport/TTransport.h"
+#include "transport/TBufferTransports.h"
+#include "protocol/TProtocol.h"
+#include "protocol/TBinaryProtocol.h"
+#include "gen-cpp/Cassandra.h"
+// cassandra includes end
+
+#include "cassandra_se.h"
+
+struct st_mysql_lex_string
+{
+ char *str;
+ size_t length;
+};
+
+using namespace std;
+using namespace apache::thrift;
+using namespace apache::thrift::transport;
+using namespace apache::thrift::protocol;
+using namespace org::apache::cassandra;
+
+
+/*
+ Implementation of connection to one Cassandra column family (ie., table)
+*/
+class Cassandra_se_impl: public Cassandra_se_interface
+{
+ CassandraClient *cass; /* Connection to cassandra */
+
+ std::string column_family;
+ std::string keyspace;
+
+ ConsistencyLevel::type write_consistency;
+ ConsistencyLevel::type read_consistency;
+
+ /* How many times to retry an operation before giving up */
+ int thrift_call_retries_to_do;
+
+
+ /* DDL data */
+ KsDef ks_def; /* KeySpace we're using (TODO: put this in table->share) */
+ CfDef cf_def; /* Column family we're using (TODO: put in table->share)*/
+ std::vector<ColumnDef>::iterator column_ddl_it;
+
+ /* The list that was returned by the last key lookup */
+ std::vector<ColumnOrSuperColumn> column_data_vec;
+ std::vector<ColumnOrSuperColumn>::iterator column_data_it;
+
+ /* Insert preparation */
+ typedef std::map<std::string, std::vector<Mutation> > ColumnFamilyToMutation;
+ typedef std::map<std::string, ColumnFamilyToMutation> KeyToCfMutationMap;
+
+ KeyToCfMutationMap batch_mutation; /* Prepare operation here */
+ int64_t insert_timestamp;
+ std::vector<Mutation>* insert_list;
+
+ /* Resultset we're reading */
+ std::vector<KeySlice> key_slice_vec;
+ std::vector<KeySlice>::iterator key_slice_it;
+
+ std::string rowkey; /* key of the record we're returning now */
+
+ SlicePredicate slice_pred;
+ SliceRange slice_pred_sr;
+ bool get_slices_returned_less;
+ bool get_slice_found_rows;
+public:
+ Cassandra_se_impl() : cass(NULL),
+ write_consistency(ConsistencyLevel::ONE),
+ read_consistency(ConsistencyLevel::ONE),
+ thrift_call_retries_to_do(0) {}
+ virtual ~Cassandra_se_impl(){ delete cass; }
+
+ /* Connection and DDL checks */
+ bool connect(const char *host, int port, const char *keyspace);
+ void set_column_family(const char *cfname) { column_family.assign(cfname); }
+
+ bool setup_ddl_checks();
+ void first_ddl_column();
+ bool next_ddl_column(char **name, int *name_len, char **value, int *value_len);
+ void get_rowkey_type(char **name, char **type);
+ size_t get_ddl_size();
+ const char* get_default_validator();
+
+ /* Settings */
+ void set_consistency_levels(ulong read_cons_level, ulong write_cons_level);
+
+ /* Writes */
+ void clear_insert_buffer();
+ void start_row_insert(const char *key, int key_len);
+ void add_insert_column(const char *name, int name_len,
+ const char *value, int value_len);
+ void add_insert_delete_column(const char *name, int name_len);
+ void add_row_deletion(const char *key, int key_len,
+ Column_name_enumerator *col_names,
+ LEX_STRING *names, uint nnames);
+
+ bool do_insert();
+
+ /* Reads, point lookups */
+ bool get_slice(char *key, size_t key_len, bool *found);
+ bool get_next_read_column(char **name, int *name_len,
+ char **value, int *value_len );
+ void get_read_rowkey(char **value, int *value_len);
+
+ /* Reads, multi-row scans */
+private:
+ bool have_rowkey_to_skip;
+ std::string rowkey_to_skip;
+
+ bool get_range_slices_param_last_key_as_start_key;
+public:
+ bool get_range_slices(bool last_key_as_start_key);
+ void finish_reading_range_slices();
+ bool get_next_range_slice_row(bool *eof);
+
+ /* Setup that's necessary before a multi-row read. (todo: use it before point lookups, too) */
+ void clear_read_columns();
+ void clear_read_all_columns();
+ void add_read_column(const char *name);
+
+ /* Reads, MRR scans */
+ void new_lookup_keys();
+ int add_lookup_key(const char *key, size_t key_len);
+ bool multiget_slice();
+
+ bool get_next_multiget_row();
+
+ bool truncate();
+
+ bool remove_row();
+
+private:
+ bool retryable_truncate();
+ bool retryable_do_insert();
+ bool retryable_remove_row();
+ bool retryable_setup_ddl_checks();
+ bool retryable_multiget_slice();
+ bool retryable_get_range_slices();
+ bool retryable_get_slice();
+
+ std::vector<std::string> mrr_keys; /* can we use allocator to put these into MRR buffer? */
+ std::map<std::string, std::vector<ColumnOrSuperColumn> > mrr_result;
+ std::map<std::string, std::vector<ColumnOrSuperColumn> >::iterator mrr_result_it;
+
+ /* Non-inherited utility functions: */
+ int64_t get_i64_timestamp();
+
+ typedef bool (Cassandra_se_impl::*retryable_func_t)();
+ bool try_operation(retryable_func_t func);
+};
+
+
+/////////////////////////////////////////////////////////////////////////////
+// Connection and setup
+/////////////////////////////////////////////////////////////////////////////
+Cassandra_se_interface *create_cassandra_se()
+{
+ return new Cassandra_se_impl;
+}
+
+
+bool Cassandra_se_impl::connect(const char *host, int port, const char *keyspace_arg)
+{
+ bool res= true;
+
+ keyspace.assign(keyspace_arg);
+
+ try {
+ boost::shared_ptr<TTransport> socket =
+ boost::shared_ptr<TSocket>(new TSocket(host, port));
+ boost::shared_ptr<TTransport> tr =
+ boost::shared_ptr<TFramedTransport>(new TFramedTransport (socket));
+ boost::shared_ptr<TProtocol> p =
+ boost::shared_ptr<TBinaryProtocol>(new TBinaryProtocol(tr));
+
+ cass= new CassandraClient(p);
+ tr->open();
+ cass->set_keyspace(keyspace_arg);
+
+ res= false; // success
+ }catch(TTransportException te){
+ print_error("%s [%d]", te.what(), te.getType());
+ }catch(InvalidRequestException ire){
+ print_error("%s [%s]", ire.what(), ire.why.c_str());
+ }catch(NotFoundException nfe){
+ print_error("%s", nfe.what());
+ }catch(TException e){
+ print_error("Thrift exception: %s", e.what());
+ }catch (...) {
+ print_error("Unknown exception");
+ }
+
+ if (!res && setup_ddl_checks())
+ res= true;
+ return res;
+}
+
+
+void Cassandra_se_impl::set_consistency_levels(ulong read_cons_level,
+ ulong write_cons_level)
+{
+ write_cons_level= (ConsistencyLevel::type)(write_cons_level + 1);
+ read_cons_level= (ConsistencyLevel::type)(read_cons_level + 1);
+}
+
+
+bool Cassandra_se_impl::retryable_setup_ddl_checks()
+{
+ try {
+
+ cass->describe_keyspace(ks_def, keyspace);
+
+ } catch (NotFoundException nfe) {
+ print_error("keyspace `%s` not found: %s", keyspace.c_str(), nfe.what());
+ return true;
+ }
+
+ std::vector<CfDef>::iterator it;
+ for (it= ks_def.cf_defs.begin(); it < ks_def.cf_defs.end(); it++)
+ {
+ cf_def= *it;
+ if (!cf_def.name.compare(column_family))
+ return false;
+ }
+
+ print_error("Column family %s not found in keyspace %s",
+ column_family.c_str(),
+ keyspace.c_str());
+ return true;
+}
+
+bool Cassandra_se_impl::setup_ddl_checks()
+{
+ return try_operation(&Cassandra_se_impl::retryable_setup_ddl_checks);
+}
+
+
+void Cassandra_se_impl::first_ddl_column()
+{
+ column_ddl_it= cf_def.column_metadata.begin();
+}
+
+
+bool Cassandra_se_impl::next_ddl_column(char **name, int *name_len,
+ char **type, int *type_len)
+{
+ if (column_ddl_it == cf_def.column_metadata.end())
+ return true;
+
+ *name= (char*)(*column_ddl_it).name.c_str();
+ *name_len= (*column_ddl_it).name.length();
+
+ *type= (char*)(*column_ddl_it).validation_class.c_str();
+ *type_len= (*column_ddl_it).validation_class.length();
+
+ column_ddl_it++;
+ return false;
+}
+
+
+void Cassandra_se_impl::get_rowkey_type(char **name, char **type)
+{
+ if (cf_def.__isset.key_validation_class)
+ *type= (char*)cf_def.key_validation_class.c_str();
+ else
+ *type= NULL;
+
+ if (cf_def.__isset.key_alias)
+ *name= (char*)cf_def.key_alias.c_str();
+ else
+ *name= NULL;
+}
+
+size_t Cassandra_se_impl::get_ddl_size()
+{
+ return cf_def.column_metadata.size();
+}
+
+const char* Cassandra_se_impl::get_default_validator()
+{
+ return cf_def.default_validation_class.c_str();
+}
+
+
+/////////////////////////////////////////////////////////////////////////////
+// Data writes
+/////////////////////////////////////////////////////////////////////////////
+int64_t Cassandra_se_impl::get_i64_timestamp()
+{
+ struct timeval td;
+ gettimeofday(&td, NULL);
+ int64_t ms = td.tv_sec;
+ ms = ms * 1000;
+ int64_t usec = td.tv_usec;
+ usec = usec / 1000;
+ ms += usec;
+
+ return ms;
+}
+
+
+void Cassandra_se_impl::clear_insert_buffer()
+{
+ batch_mutation.clear();
+}
+
+
+void Cassandra_se_impl::start_row_insert(const char *key, int key_len)
+{
+ std::string key_to_insert;
+ key_to_insert.assign(key, key_len);
+ batch_mutation[key_to_insert]= ColumnFamilyToMutation();
+ ColumnFamilyToMutation& cf_mut= batch_mutation[key_to_insert];
+
+ cf_mut[column_family]= std::vector<Mutation>();
+ insert_list= &cf_mut[column_family];
+
+ insert_timestamp= get_i64_timestamp();
+}
+
+
+void Cassandra_se_impl::add_row_deletion(const char *key, int key_len,
+ Column_name_enumerator *col_names,
+ LEX_STRING *names, uint nnames)
+{
+ std::string key_to_delete;
+ key_to_delete.assign(key, key_len);
+
+ batch_mutation[key_to_delete]= ColumnFamilyToMutation();
+ ColumnFamilyToMutation& cf_mut= batch_mutation[key_to_delete];
+
+ cf_mut[column_family]= std::vector<Mutation>();
+ std::vector<Mutation> &mutation_list= cf_mut[column_family];
+
+ Mutation mut;
+ mut.__isset.deletion= true;
+ mut.deletion.__isset.timestamp= true;
+ mut.deletion.timestamp= get_i64_timestamp();
+ mut.deletion.__isset.predicate= true;
+
+ /*
+ Attempting to delete columns with SliceRange causes exception with message
+ "Deletion does not yet support SliceRange predicates".
+
+ Delete all columns individually.
+ */
+ SlicePredicate slice_pred;
+ slice_pred.__isset.column_names= true;
+ const char *col_name;
+ while ((col_name= col_names->get_next_name()))
+ slice_pred.column_names.push_back(std::string(col_name));
+ for (uint i= 0; i < nnames; i++)
+ slice_pred.column_names.push_back(std::string(names[i].str,
+ names[i].length));
+
+ mut.deletion.predicate= slice_pred;
+
+ mutation_list.push_back(mut);
+}
+
+
+void Cassandra_se_impl::add_insert_column(const char *name,
+ int name_len,
+ const char *value,
+ int value_len)
+{
+ Mutation mut;
+ mut.__isset.column_or_supercolumn= true;
+ mut.column_or_supercolumn.__isset.column= true;
+
+ Column& col=mut.column_or_supercolumn.column;
+ if (name_len)
+ col.name.assign(name, name_len);
+ else
+ col.name.assign(name);
+ col.value.assign(value, value_len);
+ col.timestamp= insert_timestamp;
+ col.__isset.value= true;
+ col.__isset.timestamp= true;
+ insert_list->push_back(mut);
+}
+
+void Cassandra_se_impl::add_insert_delete_column(const char *name,
+ int name_len)
+{
+ Mutation mut;
+ mut.__isset.deletion= true;
+ mut.deletion.__isset.timestamp= true;
+ mut.deletion.timestamp= insert_timestamp;
+ mut.deletion.__isset.predicate= true;
+
+ SlicePredicate slice_pred;
+ slice_pred.__isset.column_names= true;
+ slice_pred.column_names.push_back(std::string(name, name_len));
+ mut.deletion.predicate= slice_pred;
+
+ insert_list->push_back(mut);
+}
+
+
+bool Cassandra_se_impl::retryable_do_insert()
+{
+ cass->batch_mutate(batch_mutation, write_consistency);
+
+ cassandra_counters.row_inserts+= batch_mutation.size();
+ cassandra_counters.row_insert_batches++;
+
+ clear_insert_buffer();
+ return 0;
+}
+
+
+bool Cassandra_se_impl::do_insert()
+{
+ /*
+ zero-size mutations are allowed by Cassandra's batch_mutate but lets not
+ do them (we may attempt to do it if there is a bulk insert that stores
+ exactly @@cassandra_insert_batch_size*n elements.
+ */
+ if (batch_mutation.empty())
+ return false;
+
+ return try_operation(&Cassandra_se_impl::retryable_do_insert);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////
+// Reading data
+/////////////////////////////////////////////////////////////////////////////
+
+/*
+ Make one key lookup. If the record is found, the result is stored locally and
+ the caller should iterate over it.
+*/
+
+bool Cassandra_se_impl::get_slice(char *key, size_t key_len, bool *found)
+{
+ bool res;
+ rowkey.assign(key, key_len);
+
+ if (!(res= try_operation(&Cassandra_se_impl::retryable_get_slice)))
+ *found= get_slice_found_rows;
+ return res;
+}
+
+
+bool Cassandra_se_impl::retryable_get_slice()
+{
+ ColumnParent cparent;
+ cparent.column_family= column_family;
+
+ SlicePredicate slice_pred;
+ SliceRange sr;
+ sr.start = "";
+ sr.finish = "";
+ slice_pred.__set_slice_range(sr);
+
+ cass->get_slice(column_data_vec, rowkey, cparent, slice_pred,
+ read_consistency);
+
+ if (column_data_vec.size() == 0)
+ {
+ /*
+ No columns found. Cassandra doesn't allow records without any column =>
+ this means the seach key doesn't exist
+ */
+ get_slice_found_rows= false;
+ return false;
+ }
+ get_slice_found_rows= true;
+
+ column_data_it= column_data_vec.begin();
+ return false;
+}
+
+
+bool Cassandra_se_impl::get_next_read_column(char **name, int *name_len,
+ char **value, int *value_len)
+{
+ bool use_counter=false;
+ while (1)
+ {
+ if (column_data_it == column_data_vec.end())
+ return true;
+
+ if ((*column_data_it).__isset.column)
+ break; /* Ok it's a real column. Should be always the case. */
+
+ if ((*column_data_it).__isset.counter_column)
+ {
+ use_counter= true;
+ break;
+ }
+
+ column_data_it++;
+ }
+
+ ColumnOrSuperColumn& cs= *column_data_it;
+ if (use_counter)
+ {
+ *name_len= cs.counter_column.name.size();
+ *name= (char*)cs.counter_column.name.c_str();
+ *value= (char*)&cs.counter_column.value;
+ *value_len= sizeof(cs.counter_column.value);
+ }
+ else
+ {
+ *name_len= cs.column.name.size();
+ *name= (char*)cs.column.name.c_str();
+ *value= (char*)cs.column.value.c_str();
+ *value_len= cs.column.value.length();
+ }
+
+ column_data_it++;
+ return false;
+}
+
+
+/* Return the rowkey for the record that was read */
+
+void Cassandra_se_impl::get_read_rowkey(char **value, int *value_len)
+{
+ *value= (char*)rowkey.c_str();
+ *value_len= rowkey.length();
+}
+
+
+bool Cassandra_se_impl::get_range_slices(bool last_key_as_start_key)
+{
+ get_range_slices_param_last_key_as_start_key= last_key_as_start_key;
+
+ return try_operation(&Cassandra_se_impl::retryable_get_range_slices);
+}
+
+
+bool Cassandra_se_impl::retryable_get_range_slices()
+{
+ bool last_key_as_start_key= get_range_slices_param_last_key_as_start_key;
+
+ ColumnParent cparent;
+ cparent.column_family= column_family;
+
+ /* SlicePredicate can be used to limit columns we will retrieve */
+
+ KeyRange key_range;
+ key_range.__isset.start_key= true;
+ key_range.__isset.end_key= true;
+
+ if (last_key_as_start_key)
+ {
+ key_range.start_key= rowkey;
+
+ have_rowkey_to_skip= true;
+ rowkey_to_skip= rowkey;
+ }
+ else
+ {
+ have_rowkey_to_skip= false;
+ key_range.start_key.assign("", 0);
+ }
+
+ key_range.end_key.assign("", 0);
+ key_range.count= read_batch_size;
+
+ cass->get_range_slices(key_slice_vec, cparent, slice_pred, key_range,
+ read_consistency);
+
+ if (key_slice_vec.size() < (uint)read_batch_size)
+ get_slices_returned_less= true;
+ else
+ get_slices_returned_less= false;
+
+ key_slice_it= key_slice_vec.begin();
+ return false;
+}
+
+
+/* Switch to next row. This may produce an error */
+bool Cassandra_se_impl::get_next_range_slice_row(bool *eof)
+{
+restart:
+ if (key_slice_it == key_slice_vec.end())
+ {
+ if (get_slices_returned_less)
+ {
+ *eof= true;
+ return false;
+ }
+
+ /*
+ We have read through all columns in this batch. Try getting the next
+ batch.
+ */
+ if (get_range_slices(true))
+ return true;
+
+ if (key_slice_vec.empty())
+ {
+ *eof= true;
+ return false;
+ }
+ }
+
+ /*
+ (1) - skip the last row that we have read in the previous batch.
+ (2) - Rows that were deleted show up as rows without any columns. Skip
+ them, like CQL does.
+ */
+ if ((have_rowkey_to_skip && !rowkey_to_skip.compare(key_slice_it->key)) || // (1)
+ key_slice_it->columns.size() == 0) // (2)
+ {
+ key_slice_it++;
+ goto restart;
+ }
+
+ *eof= false;
+ column_data_vec= key_slice_it->columns;
+ rowkey= key_slice_it->key;
+ column_data_it= column_data_vec.begin();
+ key_slice_it++;
+ return false;
+}
+
+
+void Cassandra_se_impl::finish_reading_range_slices()
+{
+ key_slice_vec.clear();
+}
+
+
+void Cassandra_se_impl::clear_read_columns()
+{
+ slice_pred.column_names.clear();
+}
+
+void Cassandra_se_impl::clear_read_all_columns()
+{
+ slice_pred_sr.start = "";
+ slice_pred_sr.finish = "";
+ slice_pred.__set_slice_range(slice_pred_sr);
+}
+
+
+void Cassandra_se_impl::add_read_column(const char *name_arg)
+{
+ std::string name(name_arg);
+ slice_pred.__isset.column_names= true;
+ slice_pred.column_names.push_back(name);
+}
+
+
+bool Cassandra_se_impl::truncate()
+{
+ return try_operation(&Cassandra_se_impl::retryable_truncate);
+}
+
+
+bool Cassandra_se_impl::retryable_truncate()
+{
+ cass->truncate(column_family);
+ return 0;
+}
+
+
+bool Cassandra_se_impl::remove_row()
+{
+ return try_operation(&Cassandra_se_impl::retryable_remove_row);
+}
+
+
+bool Cassandra_se_impl::retryable_remove_row()
+{
+ ColumnPath column_path;
+ column_path.column_family= column_family;
+ cass->remove(rowkey, column_path, get_i64_timestamp(), write_consistency);
+ return 0;
+}
+
+/*
+ Try calling a function, catching possible Cassandra errors, and re-trying
+ for "transient" errors.
+*/
+bool Cassandra_se_impl::try_operation(retryable_func_t func_to_call)
+{
+ bool res;
+ int n_retries= thrift_call_retries_to_do;
+
+ do
+ {
+ res= true;
+
+ try {
+
+ if ((res= (this->*func_to_call)()))
+ {
+ /*
+ The function call was made successfully (without timeouts, etc),
+ but something inside it returned 'true'.
+ This is supposedly a failure (or "not found" or other negative
+ result). We need to return this to the caller.
+ */
+ n_retries= 0;
+ }
+
+ } catch (InvalidRequestException ire) {
+ n_retries= 0; /* there is no point in retrying this operation */
+ print_error("%s [%s]", ire.what(), ire.why.c_str());
+ } catch (UnavailableException ue) {
+ cassandra_counters.unavailable_exceptions++;
+ if (!--n_retries)
+ print_error("UnavailableException: %s", ue.what());
+ } catch (TimedOutException te) {
+ cassandra_counters.timeout_exceptions++;
+ if (!--n_retries)
+ print_error("TimedOutException: %s", te.what());
+ }catch(TException e){
+ /* todo: we may use retry for certain kinds of Thrift errors */
+ n_retries= 0;
+ print_error("Thrift exception: %s", e.what());
+ } catch (...) {
+ n_retries= 0; /* Don't retry */
+ print_error("Unknown exception");
+ }
+
+ } while (res && n_retries > 0);
+
+ return res;
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// MRR reads
+/////////////////////////////////////////////////////////////////////////////
+
+void Cassandra_se_impl::new_lookup_keys()
+{
+ mrr_keys.clear();
+}
+
+
+int Cassandra_se_impl::add_lookup_key(const char *key, size_t key_len)
+{
+ mrr_keys.push_back(std::string(key, key_len));
+ return mrr_keys.size();
+}
+
+bool Cassandra_se_impl::multiget_slice()
+{
+ return try_operation(&Cassandra_se_impl::retryable_multiget_slice);
+}
+
+
+bool Cassandra_se_impl::retryable_multiget_slice()
+{
+ ColumnParent cparent;
+ cparent.column_family= column_family;
+
+ SlicePredicate slice_pred;
+ SliceRange sr;
+ sr.start = "";
+ sr.finish = "";
+ slice_pred.__set_slice_range(sr);
+
+ cassandra_counters.multiget_reads++;
+ cassandra_counters.multiget_keys_scanned += mrr_keys.size();
+ cass->multiget_slice(mrr_result, mrr_keys, cparent, slice_pred,
+ read_consistency);
+
+ cassandra_counters.multiget_rows_read += mrr_result.size();
+ mrr_result_it= mrr_result.begin();
+
+ return false;
+}
+
+
+bool Cassandra_se_impl::get_next_multiget_row()
+{
+ if (mrr_result_it == mrr_result.end())
+ return true; /* EOF */
+
+ column_data_vec= mrr_result_it->second;
+ rowkey= mrr_result_it->first;
+
+ column_data_it= column_data_vec.begin();
+ mrr_result_it++;
+ return false;
+}
+
+
+
diff --git a/storage/cassandra/cassandra_se.h b/storage/cassandra/cassandra_se.h
new file mode 100644
index 00000000000..050c65e6dde
--- /dev/null
+++ b/storage/cassandra/cassandra_se.h
@@ -0,0 +1,123 @@
+
+/*
+ This file is a "bridge" interface between cassandra+Thrift and MariaDB.
+
+ It is #included by both sides, so it must itself include neither (including
+ both together causes compile errors due to conflicts).
+*/
+
+struct st_mysql_lex_string;
+typedef struct st_mysql_lex_string LEX_STRING;
+
+/* We need to define this here so that ha_cassandra.cc also has access to it */
+typedef enum
+{
+ ONE = 1-1,
+ QUORUM = 2-1,
+ LOCAL_QUORUM = 3-1,
+ EACH_QUORUM = 4-1,
+ ALL = 5-1,
+ ANY = 6-1,
+ TWO = 7-1,
+ THREE = 8-1,
+} enum_cassandra_consistency_level;
+
+
+class Column_name_enumerator
+{
+public:
+ virtual const char* get_next_name()=0;
+ virtual ~Column_name_enumerator(){}
+};
+
+/*
+ Interface to one cassandra column family, i.e. one 'table'
+*/
+class Cassandra_se_interface
+{
+public:
+ Cassandra_se_interface() { err_buffer[0]=0; }
+
+ virtual ~Cassandra_se_interface(){};
+ /* Init */
+ virtual bool connect(const char *host, int port, const char *keyspace)=0;
+ virtual void set_column_family(const char *cfname) = 0;
+
+ /* Settings */
+ virtual void set_consistency_levels(ulong read_cons_level, ulong write_cons_level)=0;
+
+ /* Check underlying DDL */
+ virtual bool setup_ddl_checks()=0;
+ virtual void first_ddl_column()=0;
+ virtual bool next_ddl_column(char **name, int *name_len, char **value,
+ int *value_len)=0;
+ virtual void get_rowkey_type(char **name, char **type)=0;
+ virtual size_t get_ddl_size()=0;
+ virtual const char* get_default_validator()=0;
+
+ /* Writes */
+ virtual void clear_insert_buffer()=0;
+ virtual void add_row_deletion(const char *key, int key_len,
+ Column_name_enumerator *col_names,
+ LEX_STRING *names, uint nnames)=0;
+ virtual void start_row_insert(const char *key, int key_len)=0;
+ virtual void add_insert_delete_column(const char *name, int name_len)= 0;
+ virtual void add_insert_column(const char *name, int name_len,
+ const char *value,
+ int value_len)=0;
+ virtual bool do_insert()=0;
+
+ /* Reads */
+ virtual bool get_slice(char *key, size_t key_len, bool *found)=0 ;
+ virtual bool get_next_read_column(char **name, int *name_len,
+ char **value, int *value_len)=0;
+ virtual void get_read_rowkey(char **value, int *value_len)=0;
+
+ /* Reads, multi-row scans */
+ int read_batch_size;
+ virtual bool get_range_slices(bool last_key_as_start_key)=0;
+ virtual void finish_reading_range_slices()=0;
+ virtual bool get_next_range_slice_row(bool *eof)=0;
+
+ /* Reads, MRR scans */
+ virtual void new_lookup_keys()=0;
+ virtual int add_lookup_key(const char *key, size_t key_len)=0;
+ virtual bool multiget_slice()=0;
+ virtual bool get_next_multiget_row()=0;
+
+ /* read_set setup */
+ virtual void clear_read_columns()=0;
+ virtual void clear_read_all_columns()=0;
+ virtual void add_read_column(const char *name)=0;
+
+ virtual bool truncate()=0;
+ virtual bool remove_row()=0;
+
+ /* Passing error messages up to ha_cassandra */
+ char err_buffer[512];
+ const char *error_str() { return err_buffer; }
+ void print_error(const char *format, ...);
+};
+
+
+/* A structure with global counters */
+class Cassandra_status_vars
+{
+public:
+ ulong row_inserts;
+ ulong row_insert_batches;
+
+ ulong multiget_reads;
+ ulong multiget_keys_scanned;
+ ulong multiget_rows_read;
+
+ ulong timeout_exceptions;
+ ulong unavailable_exceptions;
+};
+
+
+extern Cassandra_status_vars cassandra_counters;
+
+
+Cassandra_se_interface *create_cassandra_se();
+
diff --git a/storage/cassandra/gen-cpp/Cassandra.cpp b/storage/cassandra/gen-cpp/Cassandra.cpp
new file mode 100644
index 00000000000..db1deb34c31
--- /dev/null
+++ b/storage/cassandra/gen-cpp/Cassandra.cpp
@@ -0,0 +1,12871 @@
+/**
+ * Autogenerated by Thrift Compiler (0.8.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include "Cassandra.h"
+
+namespace org { namespace apache { namespace cassandra {
+
+uint32_t Cassandra_login_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_auth_request = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->auth_request.read(iprot);
+ isset_auth_request = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_auth_request)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_login_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_login_args");
+ xfer += oprot->writeFieldBegin("auth_request", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->auth_request.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_login_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_login_pargs");
+ xfer += oprot->writeFieldBegin("auth_request", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += (*(this->auth_request)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_login_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->authnx.read(iprot);
+ this->__isset.authnx = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->authzx.read(iprot);
+ this->__isset.authzx = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_login_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_login_result");
+
+ if (this->__isset.authnx) {
+ xfer += oprot->writeFieldBegin("authnx", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->authnx.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.authzx) {
+ xfer += oprot->writeFieldBegin("authzx", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->authzx.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_login_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->authnx.read(iprot);
+ this->__isset.authnx = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->authzx.read(iprot);
+ this->__isset.authzx = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_set_keyspace_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_keyspace = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->keyspace);
+ isset_keyspace = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_keyspace)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_set_keyspace_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_set_keyspace_args");
+ xfer += oprot->writeFieldBegin("keyspace", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->keyspace);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_set_keyspace_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_set_keyspace_pargs");
+ xfer += oprot->writeFieldBegin("keyspace", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->keyspace)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_set_keyspace_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_set_keyspace_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_set_keyspace_result");
+
+ if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_set_keyspace_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_key = false;
+ bool isset_column_path = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->key);
+ isset_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_path.read(iprot);
+ isset_column_path = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast149;
+ xfer += iprot->readI32(ecast149);
+ this->consistency_level = (ConsistencyLevel::type)ecast149;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_key)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_column_path)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_get_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_args");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->key);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_path", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->column_path.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 3);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_pargs");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary((*(this->key)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_path", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->column_path)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 3);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->success.read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->nfe.read(iprot);
+ this->__isset.nfe = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_get_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+ xfer += this->success.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.nfe) {
+ xfer += oprot->writeFieldBegin("nfe", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->nfe.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 4);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += (*(this->success)).read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->nfe.read(iprot);
+ this->__isset.nfe = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_slice_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_key = false;
+ bool isset_column_parent = false;
+ bool isset_predicate = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->key);
+ isset_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_parent.read(iprot);
+ isset_column_parent = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->predicate.read(iprot);
+ isset_predicate = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast150;
+ xfer += iprot->readI32(ecast150);
+ this->consistency_level = (ConsistencyLevel::type)ecast150;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_key)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_column_parent)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_predicate)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_get_slice_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_slice_args");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->key);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->column_parent.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("predicate", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->predicate.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_slice_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_slice_pargs");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary((*(this->key)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->column_parent)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("predicate", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += (*(this->predicate)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_slice_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->success.clear();
+ uint32_t _size151;
+ ::apache::thrift::protocol::TType _etype154;
+ iprot->readListBegin(_etype154, _size151);
+ this->success.resize(_size151);
+ uint32_t _i155;
+ for (_i155 = 0; _i155 < _size151; ++_i155)
+ {
+ xfer += this->success[_i155].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_slice_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_get_slice_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
+ std::vector<ColumnOrSuperColumn> ::const_iterator _iter156;
+ for (_iter156 = this->success.begin(); _iter156 != this->success.end(); ++_iter156)
+ {
+ xfer += (*_iter156).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_slice_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size157;
+ ::apache::thrift::protocol::TType _etype160;
+ iprot->readListBegin(_etype160, _size157);
+ (*(this->success)).resize(_size157);
+ uint32_t _i161;
+ for (_i161 = 0; _i161 < _size157; ++_i161)
+ {
+ xfer += (*(this->success))[_i161].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_count_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_key = false;
+ bool isset_column_parent = false;
+ bool isset_predicate = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->key);
+ isset_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_parent.read(iprot);
+ isset_column_parent = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->predicate.read(iprot);
+ isset_predicate = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast162;
+ xfer += iprot->readI32(ecast162);
+ this->consistency_level = (ConsistencyLevel::type)ecast162;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_key)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_column_parent)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_predicate)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_get_count_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_count_args");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->key);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->column_parent.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("predicate", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->predicate.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_count_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_count_pargs");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary((*(this->key)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->column_parent)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("predicate", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += (*(this->predicate)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_count_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->success);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_count_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_get_count_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_I32, 0);
+ xfer += oprot->writeI32(this->success);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_count_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32((*(this->success)));
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_slice_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_keys = false;
+ bool isset_column_parent = false;
+ bool isset_predicate = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->keys.clear();
+ uint32_t _size163;
+ ::apache::thrift::protocol::TType _etype166;
+ iprot->readListBegin(_etype166, _size163);
+ this->keys.resize(_size163);
+ uint32_t _i167;
+ for (_i167 = 0; _i167 < _size163; ++_i167)
+ {
+ xfer += iprot->readBinary(this->keys[_i167]);
+ }
+ iprot->readListEnd();
+ }
+ isset_keys = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_parent.read(iprot);
+ isset_column_parent = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->predicate.read(iprot);
+ isset_predicate = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast168;
+ xfer += iprot->readI32(ecast168);
+ this->consistency_level = (ConsistencyLevel::type)ecast168;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_keys)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_column_parent)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_predicate)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_slice_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_multiget_slice_args");
+ xfer += oprot->writeFieldBegin("keys", ::apache::thrift::protocol::T_LIST, 1);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->keys.size()));
+ std::vector<std::string> ::const_iterator _iter169;
+ for (_iter169 = this->keys.begin(); _iter169 != this->keys.end(); ++_iter169)
+ {
+ xfer += oprot->writeBinary((*_iter169));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->column_parent.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("predicate", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->predicate.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_slice_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_multiget_slice_pargs");
+ xfer += oprot->writeFieldBegin("keys", ::apache::thrift::protocol::T_LIST, 1);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->keys)).size()));
+ std::vector<std::string> ::const_iterator _iter170;
+ for (_iter170 = (*(this->keys)).begin(); _iter170 != (*(this->keys)).end(); ++_iter170)
+ {
+ xfer += oprot->writeBinary((*_iter170));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->column_parent)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("predicate", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += (*(this->predicate)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_slice_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->success.clear();
+ uint32_t _size171;
+ ::apache::thrift::protocol::TType _ktype172;
+ ::apache::thrift::protocol::TType _vtype173;
+ iprot->readMapBegin(_ktype172, _vtype173, _size171);
+ uint32_t _i175;
+ for (_i175 = 0; _i175 < _size171; ++_i175)
+ {
+ std::string _key176;
+ xfer += iprot->readBinary(_key176);
+ std::vector<ColumnOrSuperColumn> & _val177 = this->success[_key176];
+ {
+ _val177.clear();
+ uint32_t _size178;
+ ::apache::thrift::protocol::TType _etype181;
+ iprot->readListBegin(_etype181, _size178);
+ _val177.resize(_size178);
+ uint32_t _i182;
+ for (_i182 = 0; _i182 < _size178; ++_i182)
+ {
+ xfer += _val177[_i182].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_slice_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_multiget_slice_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast<uint32_t>(this->success.size()));
+ std::map<std::string, std::vector<ColumnOrSuperColumn> > ::const_iterator _iter183;
+ for (_iter183 = this->success.begin(); _iter183 != this->success.end(); ++_iter183)
+ {
+ xfer += oprot->writeBinary(_iter183->first);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(_iter183->second.size()));
+ std::vector<ColumnOrSuperColumn> ::const_iterator _iter184;
+ for (_iter184 = _iter183->second.begin(); _iter184 != _iter183->second.end(); ++_iter184)
+ {
+ xfer += (*_iter184).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_slice_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size185;
+ ::apache::thrift::protocol::TType _ktype186;
+ ::apache::thrift::protocol::TType _vtype187;
+ iprot->readMapBegin(_ktype186, _vtype187, _size185);
+ uint32_t _i189;
+ for (_i189 = 0; _i189 < _size185; ++_i189)
+ {
+ std::string _key190;
+ xfer += iprot->readBinary(_key190);
+ std::vector<ColumnOrSuperColumn> & _val191 = (*(this->success))[_key190];
+ {
+ _val191.clear();
+ uint32_t _size192;
+ ::apache::thrift::protocol::TType _etype195;
+ iprot->readListBegin(_etype195, _size192);
+ _val191.resize(_size192);
+ uint32_t _i196;
+ for (_i196 = 0; _i196 < _size192; ++_i196)
+ {
+ xfer += _val191[_i196].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_count_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_keys = false;
+ bool isset_column_parent = false;
+ bool isset_predicate = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->keys.clear();
+ uint32_t _size197;
+ ::apache::thrift::protocol::TType _etype200;
+ iprot->readListBegin(_etype200, _size197);
+ this->keys.resize(_size197);
+ uint32_t _i201;
+ for (_i201 = 0; _i201 < _size197; ++_i201)
+ {
+ xfer += iprot->readBinary(this->keys[_i201]);
+ }
+ iprot->readListEnd();
+ }
+ isset_keys = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_parent.read(iprot);
+ isset_column_parent = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->predicate.read(iprot);
+ isset_predicate = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast202;
+ xfer += iprot->readI32(ecast202);
+ this->consistency_level = (ConsistencyLevel::type)ecast202;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_keys)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_column_parent)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_predicate)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_count_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_multiget_count_args");
+ xfer += oprot->writeFieldBegin("keys", ::apache::thrift::protocol::T_LIST, 1);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->keys.size()));
+ std::vector<std::string> ::const_iterator _iter203;
+ for (_iter203 = this->keys.begin(); _iter203 != this->keys.end(); ++_iter203)
+ {
+ xfer += oprot->writeBinary((*_iter203));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->column_parent.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("predicate", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->predicate.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_count_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_multiget_count_pargs");
+ xfer += oprot->writeFieldBegin("keys", ::apache::thrift::protocol::T_LIST, 1);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->keys)).size()));
+ std::vector<std::string> ::const_iterator _iter204;
+ for (_iter204 = (*(this->keys)).begin(); _iter204 != (*(this->keys)).end(); ++_iter204)
+ {
+ xfer += oprot->writeBinary((*_iter204));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->column_parent)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("predicate", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += (*(this->predicate)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_count_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->success.clear();
+ uint32_t _size205;
+ ::apache::thrift::protocol::TType _ktype206;
+ ::apache::thrift::protocol::TType _vtype207;
+ iprot->readMapBegin(_ktype206, _vtype207, _size205);
+ uint32_t _i209;
+ for (_i209 = 0; _i209 < _size205; ++_i209)
+ {
+ std::string _key210;
+ xfer += iprot->readBinary(_key210);
+ int32_t& _val211 = this->success[_key210];
+ xfer += iprot->readI32(_val211);
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_count_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_multiget_count_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_I32, static_cast<uint32_t>(this->success.size()));
+ std::map<std::string, int32_t> ::const_iterator _iter212;
+ for (_iter212 = this->success.begin(); _iter212 != this->success.end(); ++_iter212)
+ {
+ xfer += oprot->writeBinary(_iter212->first);
+ xfer += oprot->writeI32(_iter212->second);
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_multiget_count_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size213;
+ ::apache::thrift::protocol::TType _ktype214;
+ ::apache::thrift::protocol::TType _vtype215;
+ iprot->readMapBegin(_ktype214, _vtype215, _size213);
+ uint32_t _i217;
+ for (_i217 = 0; _i217 < _size213; ++_i217)
+ {
+ std::string _key218;
+ xfer += iprot->readBinary(_key218);
+ int32_t& _val219 = (*(this->success))[_key218];
+ xfer += iprot->readI32(_val219);
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_range_slices_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_column_parent = false;
+ bool isset_predicate = false;
+ bool isset_range = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_parent.read(iprot);
+ isset_column_parent = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->predicate.read(iprot);
+ isset_predicate = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->range.read(iprot);
+ isset_range = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast220;
+ xfer += iprot->readI32(ecast220);
+ this->consistency_level = (ConsistencyLevel::type)ecast220;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_column_parent)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_predicate)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_range)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_get_range_slices_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_range_slices_args");
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->column_parent.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("predicate", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->predicate.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("range", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->range.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_range_slices_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_range_slices_pargs");
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += (*(this->column_parent)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("predicate", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->predicate)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("range", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += (*(this->range)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_range_slices_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->success.clear();
+ uint32_t _size221;
+ ::apache::thrift::protocol::TType _etype224;
+ iprot->readListBegin(_etype224, _size221);
+ this->success.resize(_size221);
+ uint32_t _i225;
+ for (_i225 = 0; _i225 < _size221; ++_i225)
+ {
+ xfer += this->success[_i225].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_range_slices_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_get_range_slices_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
+ std::vector<KeySlice> ::const_iterator _iter226;
+ for (_iter226 = this->success.begin(); _iter226 != this->success.end(); ++_iter226)
+ {
+ xfer += (*_iter226).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_range_slices_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size227;
+ ::apache::thrift::protocol::TType _etype230;
+ iprot->readListBegin(_etype230, _size227);
+ (*(this->success)).resize(_size227);
+ uint32_t _i231;
+ for (_i231 = 0; _i231 < _size227; ++_i231)
+ {
+ xfer += (*(this->success))[_i231].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_paged_slice_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_column_family = false;
+ bool isset_range = false;
+ bool isset_start_column = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->column_family);
+ isset_column_family = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->range.read(iprot);
+ isset_range = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->start_column);
+ isset_start_column = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast232;
+ xfer += iprot->readI32(ecast232);
+ this->consistency_level = (ConsistencyLevel::type)ecast232;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_column_family)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_range)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_start_column)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_get_paged_slice_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_paged_slice_args");
+ xfer += oprot->writeFieldBegin("column_family", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->column_family);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("range", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->range.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("start_column", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeBinary(this->start_column);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_paged_slice_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_paged_slice_pargs");
+ xfer += oprot->writeFieldBegin("column_family", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->column_family)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("range", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->range)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("start_column", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeBinary((*(this->start_column)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_paged_slice_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->success.clear();
+ uint32_t _size233;
+ ::apache::thrift::protocol::TType _etype236;
+ iprot->readListBegin(_etype236, _size233);
+ this->success.resize(_size233);
+ uint32_t _i237;
+ for (_i237 = 0; _i237 < _size233; ++_i237)
+ {
+ xfer += this->success[_i237].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_paged_slice_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_get_paged_slice_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
+ std::vector<KeySlice> ::const_iterator _iter238;
+ for (_iter238 = this->success.begin(); _iter238 != this->success.end(); ++_iter238)
+ {
+ xfer += (*_iter238).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_paged_slice_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size239;
+ ::apache::thrift::protocol::TType _etype242;
+ iprot->readListBegin(_etype242, _size239);
+ (*(this->success)).resize(_size239);
+ uint32_t _i243;
+ for (_i243 = 0; _i243 < _size239; ++_i243)
+ {
+ xfer += (*(this->success))[_i243].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_indexed_slices_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_column_parent = false;
+ bool isset_index_clause = false;
+ bool isset_column_predicate = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_parent.read(iprot);
+ isset_column_parent = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->index_clause.read(iprot);
+ isset_index_clause = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_predicate.read(iprot);
+ isset_column_predicate = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast244;
+ xfer += iprot->readI32(ecast244);
+ this->consistency_level = (ConsistencyLevel::type)ecast244;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_column_parent)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_index_clause)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_column_predicate)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_get_indexed_slices_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_indexed_slices_args");
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->column_parent.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("index_clause", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->index_clause.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_predicate", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->column_predicate.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_indexed_slices_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_get_indexed_slices_pargs");
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += (*(this->column_parent)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("index_clause", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->index_clause)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_predicate", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += (*(this->column_predicate)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_indexed_slices_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->success.clear();
+ uint32_t _size245;
+ ::apache::thrift::protocol::TType _etype248;
+ iprot->readListBegin(_etype248, _size245);
+ this->success.resize(_size245);
+ uint32_t _i249;
+ for (_i249 = 0; _i249 < _size245; ++_i249)
+ {
+ xfer += this->success[_i249].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_get_indexed_slices_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_get_indexed_slices_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
+ std::vector<KeySlice> ::const_iterator _iter250;
+ for (_iter250 = this->success.begin(); _iter250 != this->success.end(); ++_iter250)
+ {
+ xfer += (*_iter250).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_get_indexed_slices_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size251;
+ ::apache::thrift::protocol::TType _etype254;
+ iprot->readListBegin(_etype254, _size251);
+ (*(this->success)).resize(_size251);
+ uint32_t _i255;
+ for (_i255 = 0; _i255 < _size251; ++_i255)
+ {
+ xfer += (*(this->success))[_i255].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_insert_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_key = false;
+ bool isset_column_parent = false;
+ bool isset_column = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->key);
+ isset_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_parent.read(iprot);
+ isset_column_parent = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column.read(iprot);
+ isset_column = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast256;
+ xfer += iprot->readI32(ecast256);
+ this->consistency_level = (ConsistencyLevel::type)ecast256;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_key)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_column_parent)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_column)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_insert_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_insert_args");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->key);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->column_parent.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->column.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_insert_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_insert_pargs");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary((*(this->key)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->column_parent)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += (*(this->column)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_insert_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_insert_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_insert_result");
+
+ if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_insert_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_add_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_key = false;
+ bool isset_column_parent = false;
+ bool isset_column = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->key);
+ isset_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_parent.read(iprot);
+ isset_column_parent = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column.read(iprot);
+ isset_column = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast257;
+ xfer += iprot->readI32(ecast257);
+ this->consistency_level = (ConsistencyLevel::type)ecast257;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_key)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_column_parent)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_column)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_add_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_add_args");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->key);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->column_parent.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->column.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_add_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_add_pargs");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary((*(this->key)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_parent", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->column_parent)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += (*(this->column)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_add_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_add_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_add_result");
+
+ if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_add_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_remove_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_key = false;
+ bool isset_column_path = false;
+ bool isset_timestamp = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->key);
+ isset_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_path.read(iprot);
+ isset_column_path = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->timestamp);
+ isset_timestamp = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast258;
+ xfer += iprot->readI32(ecast258);
+ this->consistency_level = (ConsistencyLevel::type)ecast258;
+ this->__isset.consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_key)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_column_path)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_timestamp)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_remove_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_remove_args");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->key);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_path", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->column_path.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("timestamp", ::apache::thrift::protocol::T_I64, 3);
+ xfer += oprot->writeI64(this->timestamp);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_remove_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_remove_pargs");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary((*(this->key)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("column_path", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->column_path)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("timestamp", ::apache::thrift::protocol::T_I64, 3);
+ xfer += oprot->writeI64((*(this->timestamp)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_remove_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_remove_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_remove_result");
+
+ if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_remove_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_remove_counter_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_key = false;
+ bool isset_path = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->key);
+ isset_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->path.read(iprot);
+ isset_path = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast259;
+ xfer += iprot->readI32(ecast259);
+ this->consistency_level = (ConsistencyLevel::type)ecast259;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_key)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_path)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_remove_counter_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_remove_counter_args");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->key);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("path", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->path.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 3);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_remove_counter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_remove_counter_pargs");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary((*(this->key)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("path", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += (*(this->path)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 3);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_remove_counter_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_remove_counter_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_remove_counter_result");
+
+ if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_remove_counter_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_batch_mutate_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_mutation_map = false;
+ bool isset_consistency_level = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->mutation_map.clear();
+ uint32_t _size260;
+ ::apache::thrift::protocol::TType _ktype261;
+ ::apache::thrift::protocol::TType _vtype262;
+ iprot->readMapBegin(_ktype261, _vtype262, _size260);
+ uint32_t _i264;
+ for (_i264 = 0; _i264 < _size260; ++_i264)
+ {
+ std::string _key265;
+ xfer += iprot->readBinary(_key265);
+ std::map<std::string, std::vector<Mutation> > & _val266 = this->mutation_map[_key265];
+ {
+ _val266.clear();
+ uint32_t _size267;
+ ::apache::thrift::protocol::TType _ktype268;
+ ::apache::thrift::protocol::TType _vtype269;
+ iprot->readMapBegin(_ktype268, _vtype269, _size267);
+ uint32_t _i271;
+ for (_i271 = 0; _i271 < _size267; ++_i271)
+ {
+ std::string _key272;
+ xfer += iprot->readString(_key272);
+ std::vector<Mutation> & _val273 = _val266[_key272];
+ {
+ _val273.clear();
+ uint32_t _size274;
+ ::apache::thrift::protocol::TType _etype277;
+ iprot->readListBegin(_etype277, _size274);
+ _val273.resize(_size274);
+ uint32_t _i278;
+ for (_i278 = 0; _i278 < _size274; ++_i278)
+ {
+ xfer += _val273[_i278].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ }
+ iprot->readMapEnd();
+ }
+ }
+ iprot->readMapEnd();
+ }
+ isset_mutation_map = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast279;
+ xfer += iprot->readI32(ecast279);
+ this->consistency_level = (ConsistencyLevel::type)ecast279;
+ isset_consistency_level = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_mutation_map)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_consistency_level)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_batch_mutate_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_batch_mutate_args");
+ xfer += oprot->writeFieldBegin("mutation_map", ::apache::thrift::protocol::T_MAP, 1);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_MAP, static_cast<uint32_t>(this->mutation_map.size()));
+ std::map<std::string, std::map<std::string, std::vector<Mutation> > > ::const_iterator _iter280;
+ for (_iter280 = this->mutation_map.begin(); _iter280 != this->mutation_map.end(); ++_iter280)
+ {
+ xfer += oprot->writeBinary(_iter280->first);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast<uint32_t>(_iter280->second.size()));
+ std::map<std::string, std::vector<Mutation> > ::const_iterator _iter281;
+ for (_iter281 = _iter280->second.begin(); _iter281 != _iter280->second.end(); ++_iter281)
+ {
+ xfer += oprot->writeString(_iter281->first);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(_iter281->second.size()));
+ std::vector<Mutation> ::const_iterator _iter282;
+ for (_iter282 = _iter281->second.begin(); _iter282 != _iter281->second.end(); ++_iter282)
+ {
+ xfer += (*_iter282).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 2);
+ xfer += oprot->writeI32((int32_t)this->consistency_level);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_batch_mutate_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_batch_mutate_pargs");
+ xfer += oprot->writeFieldBegin("mutation_map", ::apache::thrift::protocol::T_MAP, 1);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_MAP, static_cast<uint32_t>((*(this->mutation_map)).size()));
+ std::map<std::string, std::map<std::string, std::vector<Mutation> > > ::const_iterator _iter283;
+ for (_iter283 = (*(this->mutation_map)).begin(); _iter283 != (*(this->mutation_map)).end(); ++_iter283)
+ {
+ xfer += oprot->writeBinary(_iter283->first);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast<uint32_t>(_iter283->second.size()));
+ std::map<std::string, std::vector<Mutation> > ::const_iterator _iter284;
+ for (_iter284 = _iter283->second.begin(); _iter284 != _iter283->second.end(); ++_iter284)
+ {
+ xfer += oprot->writeString(_iter284->first);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(_iter284->second.size()));
+ std::vector<Mutation> ::const_iterator _iter285;
+ for (_iter285 = _iter284->second.begin(); _iter285 != _iter284->second.end(); ++_iter285)
+ {
+ xfer += (*_iter285).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("consistency_level", ::apache::thrift::protocol::T_I32, 2);
+ xfer += oprot->writeI32((int32_t)(*(this->consistency_level)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_batch_mutate_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_batch_mutate_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_batch_mutate_result");
+
+ if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_batch_mutate_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_truncate_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_cfname = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->cfname);
+ isset_cfname = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_cfname)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_truncate_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_truncate_args");
+ xfer += oprot->writeFieldBegin("cfname", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->cfname);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_truncate_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_truncate_pargs");
+ xfer += oprot->writeFieldBegin("cfname", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->cfname)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_truncate_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_truncate_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_truncate_result");
+
+ if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_truncate_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_schema_versions_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_schema_versions_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_schema_versions_args");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_schema_versions_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_schema_versions_pargs");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_schema_versions_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->success.clear();
+ uint32_t _size286;
+ ::apache::thrift::protocol::TType _ktype287;
+ ::apache::thrift::protocol::TType _vtype288;
+ iprot->readMapBegin(_ktype287, _vtype288, _size286);
+ uint32_t _i290;
+ for (_i290 = 0; _i290 < _size286; ++_i290)
+ {
+ std::string _key291;
+ xfer += iprot->readString(_key291);
+ std::vector<std::string> & _val292 = this->success[_key291];
+ {
+ _val292.clear();
+ uint32_t _size293;
+ ::apache::thrift::protocol::TType _etype296;
+ iprot->readListBegin(_etype296, _size293);
+ _val292.resize(_size293);
+ uint32_t _i297;
+ for (_i297 = 0; _i297 < _size293; ++_i297)
+ {
+ xfer += iprot->readString(_val292[_i297]);
+ }
+ iprot->readListEnd();
+ }
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_schema_versions_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_describe_schema_versions_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast<uint32_t>(this->success.size()));
+ std::map<std::string, std::vector<std::string> > ::const_iterator _iter298;
+ for (_iter298 = this->success.begin(); _iter298 != this->success.end(); ++_iter298)
+ {
+ xfer += oprot->writeString(_iter298->first);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(_iter298->second.size()));
+ std::vector<std::string> ::const_iterator _iter299;
+ for (_iter299 = _iter298->second.begin(); _iter299 != _iter298->second.end(); ++_iter299)
+ {
+ xfer += oprot->writeString((*_iter299));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_schema_versions_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size300;
+ ::apache::thrift::protocol::TType _ktype301;
+ ::apache::thrift::protocol::TType _vtype302;
+ iprot->readMapBegin(_ktype301, _vtype302, _size300);
+ uint32_t _i304;
+ for (_i304 = 0; _i304 < _size300; ++_i304)
+ {
+ std::string _key305;
+ xfer += iprot->readString(_key305);
+ std::vector<std::string> & _val306 = (*(this->success))[_key305];
+ {
+ _val306.clear();
+ uint32_t _size307;
+ ::apache::thrift::protocol::TType _etype310;
+ iprot->readListBegin(_etype310, _size307);
+ _val306.resize(_size307);
+ uint32_t _i311;
+ for (_i311 = 0; _i311 < _size307; ++_i311)
+ {
+ xfer += iprot->readString(_val306[_i311]);
+ }
+ iprot->readListEnd();
+ }
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspaces_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspaces_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_keyspaces_args");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspaces_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_keyspaces_pargs");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspaces_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->success.clear();
+ uint32_t _size312;
+ ::apache::thrift::protocol::TType _etype315;
+ iprot->readListBegin(_etype315, _size312);
+ this->success.resize(_size312);
+ uint32_t _i316;
+ for (_i316 = 0; _i316 < _size312; ++_i316)
+ {
+ xfer += this->success[_i316].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspaces_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_describe_keyspaces_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
+ std::vector<KsDef> ::const_iterator _iter317;
+ for (_iter317 = this->success.begin(); _iter317 != this->success.end(); ++_iter317)
+ {
+ xfer += (*_iter317).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspaces_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size318;
+ ::apache::thrift::protocol::TType _etype321;
+ iprot->readListBegin(_etype321, _size318);
+ (*(this->success)).resize(_size318);
+ uint32_t _i322;
+ for (_i322 = 0; _i322 < _size318; ++_i322)
+ {
+ xfer += (*(this->success))[_i322].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_cluster_name_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_cluster_name_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_cluster_name_args");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_cluster_name_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_cluster_name_pargs");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_cluster_name_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->success);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_cluster_name_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_describe_cluster_name_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+ xfer += oprot->writeString(this->success);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_cluster_name_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString((*(this->success)));
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_version_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_version_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_version_args");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_version_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_version_pargs");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_version_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->success);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_version_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_describe_version_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+ xfer += oprot->writeString(this->success);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_version_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString((*(this->success)));
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_ring_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_keyspace = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->keyspace);
+ isset_keyspace = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_keyspace)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_describe_ring_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_ring_args");
+ xfer += oprot->writeFieldBegin("keyspace", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->keyspace);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_ring_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_ring_pargs");
+ xfer += oprot->writeFieldBegin("keyspace", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->keyspace)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_ring_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->success.clear();
+ uint32_t _size323;
+ ::apache::thrift::protocol::TType _etype326;
+ iprot->readListBegin(_etype326, _size323);
+ this->success.resize(_size323);
+ uint32_t _i327;
+ for (_i327 = 0; _i327 < _size323; ++_i327)
+ {
+ xfer += this->success[_i327].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_ring_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_describe_ring_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
+ std::vector<TokenRange> ::const_iterator _iter328;
+ for (_iter328 = this->success.begin(); _iter328 != this->success.end(); ++_iter328)
+ {
+ xfer += (*_iter328).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_ring_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size329;
+ ::apache::thrift::protocol::TType _etype332;
+ iprot->readListBegin(_etype332, _size329);
+ (*(this->success)).resize(_size329);
+ uint32_t _i333;
+ for (_i333 = 0; _i333 < _size329; ++_i333)
+ {
+ xfer += (*(this->success))[_i333].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_token_map_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_token_map_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_token_map_args");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_token_map_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_token_map_pargs");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_token_map_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->success.clear();
+ uint32_t _size334;
+ ::apache::thrift::protocol::TType _ktype335;
+ ::apache::thrift::protocol::TType _vtype336;
+ iprot->readMapBegin(_ktype335, _vtype336, _size334);
+ uint32_t _i338;
+ for (_i338 = 0; _i338 < _size334; ++_i338)
+ {
+ std::string _key339;
+ xfer += iprot->readString(_key339);
+ std::string& _val340 = this->success[_key339];
+ xfer += iprot->readString(_val340);
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_token_map_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_describe_token_map_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
+ std::map<std::string, std::string> ::const_iterator _iter341;
+ for (_iter341 = this->success.begin(); _iter341 != this->success.end(); ++_iter341)
+ {
+ xfer += oprot->writeString(_iter341->first);
+ xfer += oprot->writeString(_iter341->second);
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_token_map_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size342;
+ ::apache::thrift::protocol::TType _ktype343;
+ ::apache::thrift::protocol::TType _vtype344;
+ iprot->readMapBegin(_ktype343, _vtype344, _size342);
+ uint32_t _i346;
+ for (_i346 = 0; _i346 < _size342; ++_i346)
+ {
+ std::string _key347;
+ xfer += iprot->readString(_key347);
+ std::string& _val348 = (*(this->success))[_key347];
+ xfer += iprot->readString(_val348);
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_partitioner_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_partitioner_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_partitioner_args");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_partitioner_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_partitioner_pargs");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_partitioner_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->success);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_partitioner_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_describe_partitioner_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+ xfer += oprot->writeString(this->success);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_partitioner_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString((*(this->success)));
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_snitch_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_snitch_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_snitch_args");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_snitch_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_snitch_pargs");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_snitch_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->success);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_snitch_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_describe_snitch_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+ xfer += oprot->writeString(this->success);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_snitch_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString((*(this->success)));
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspace_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_keyspace = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->keyspace);
+ isset_keyspace = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_keyspace)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspace_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_keyspace_args");
+ xfer += oprot->writeFieldBegin("keyspace", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->keyspace);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspace_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_keyspace_pargs");
+ xfer += oprot->writeFieldBegin("keyspace", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->keyspace)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspace_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->success.read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->nfe.read(iprot);
+ this->__isset.nfe = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspace_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_describe_keyspace_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+ xfer += this->success.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.nfe) {
+ xfer += oprot->writeFieldBegin("nfe", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->nfe.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_keyspace_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += (*(this->success)).read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->nfe.read(iprot);
+ this->__isset.nfe = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_splits_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_cfName = false;
+ bool isset_start_token = false;
+ bool isset_end_token = false;
+ bool isset_keys_per_split = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->cfName);
+ isset_cfName = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->start_token);
+ isset_start_token = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->end_token);
+ isset_end_token = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->keys_per_split);
+ isset_keys_per_split = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_cfName)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_start_token)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_end_token)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_keys_per_split)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_describe_splits_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_splits_args");
+ xfer += oprot->writeFieldBegin("cfName", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->cfName);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("start_token", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->start_token);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("end_token", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->end_token);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("keys_per_split", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32(this->keys_per_split);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_splits_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_describe_splits_pargs");
+ xfer += oprot->writeFieldBegin("cfName", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->cfName)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("start_token", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString((*(this->start_token)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("end_token", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString((*(this->end_token)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("keys_per_split", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32((*(this->keys_per_split)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_splits_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->success.clear();
+ uint32_t _size349;
+ ::apache::thrift::protocol::TType _etype352;
+ iprot->readListBegin(_etype352, _size349);
+ this->success.resize(_size349);
+ uint32_t _i353;
+ for (_i353 = 0; _i353 < _size349; ++_i353)
+ {
+ xfer += iprot->readString(this->success[_i353]);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_describe_splits_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_describe_splits_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
+ std::vector<std::string> ::const_iterator _iter354;
+ for (_iter354 = this->success.begin(); _iter354 != this->success.end(); ++_iter354)
+ {
+ xfer += oprot->writeString((*_iter354));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_describe_splits_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ (*(this->success)).clear();
+ uint32_t _size355;
+ ::apache::thrift::protocol::TType _etype358;
+ iprot->readListBegin(_etype358, _size355);
+ (*(this->success)).resize(_size355);
+ uint32_t _i359;
+ for (_i359 = 0; _i359 < _size355; ++_i359)
+ {
+ xfer += iprot->readString((*(this->success))[_i359]);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_column_family_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_cf_def = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->cf_def.read(iprot);
+ isset_cf_def = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_cf_def)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_column_family_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_add_column_family_args");
+ xfer += oprot->writeFieldBegin("cf_def", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->cf_def.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_column_family_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_add_column_family_pargs");
+ xfer += oprot->writeFieldBegin("cf_def", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += (*(this->cf_def)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_column_family_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->success);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_column_family_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_system_add_column_family_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+ xfer += oprot->writeString(this->success);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.sde) {
+ xfer += oprot->writeFieldBegin("sde", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->sde.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_column_family_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString((*(this->success)));
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_column_family_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_column_family = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->column_family);
+ isset_column_family = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_column_family)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_column_family_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_drop_column_family_args");
+ xfer += oprot->writeFieldBegin("column_family", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->column_family);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_column_family_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_drop_column_family_pargs");
+ xfer += oprot->writeFieldBegin("column_family", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->column_family)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_column_family_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->success);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_column_family_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_system_drop_column_family_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+ xfer += oprot->writeString(this->success);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.sde) {
+ xfer += oprot->writeFieldBegin("sde", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->sde.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_column_family_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString((*(this->success)));
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_keyspace_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_ks_def = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ks_def.read(iprot);
+ isset_ks_def = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_ks_def)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_keyspace_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_add_keyspace_args");
+ xfer += oprot->writeFieldBegin("ks_def", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ks_def.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_keyspace_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_add_keyspace_pargs");
+ xfer += oprot->writeFieldBegin("ks_def", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += (*(this->ks_def)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_keyspace_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->success);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_keyspace_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_system_add_keyspace_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+ xfer += oprot->writeString(this->success);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.sde) {
+ xfer += oprot->writeFieldBegin("sde", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->sde.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_add_keyspace_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString((*(this->success)));
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_keyspace_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_keyspace = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->keyspace);
+ isset_keyspace = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_keyspace)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_keyspace_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_drop_keyspace_args");
+ xfer += oprot->writeFieldBegin("keyspace", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->keyspace);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_keyspace_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_drop_keyspace_pargs");
+ xfer += oprot->writeFieldBegin("keyspace", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->keyspace)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_keyspace_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->success);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_keyspace_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_system_drop_keyspace_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+ xfer += oprot->writeString(this->success);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.sde) {
+ xfer += oprot->writeFieldBegin("sde", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->sde.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_drop_keyspace_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString((*(this->success)));
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_keyspace_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_ks_def = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ks_def.read(iprot);
+ isset_ks_def = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_ks_def)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_keyspace_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_update_keyspace_args");
+ xfer += oprot->writeFieldBegin("ks_def", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ks_def.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_keyspace_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_update_keyspace_pargs");
+ xfer += oprot->writeFieldBegin("ks_def", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += (*(this->ks_def)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_keyspace_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->success);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_keyspace_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_system_update_keyspace_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+ xfer += oprot->writeString(this->success);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.sde) {
+ xfer += oprot->writeFieldBegin("sde", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->sde.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_keyspace_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString((*(this->success)));
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_column_family_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_cf_def = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->cf_def.read(iprot);
+ isset_cf_def = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_cf_def)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_column_family_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_update_column_family_args");
+ xfer += oprot->writeFieldBegin("cf_def", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->cf_def.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_column_family_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_system_update_column_family_pargs");
+ xfer += oprot->writeFieldBegin("cf_def", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += (*(this->cf_def)).write(oprot);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_column_family_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->success);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_column_family_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_system_update_column_family_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+ xfer += oprot->writeString(this->success);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.sde) {
+ xfer += oprot->writeFieldBegin("sde", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->sde.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_system_update_column_family_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString((*(this->success)));
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_execute_cql_query_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_query = false;
+ bool isset_compression = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->query);
+ isset_query = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast360;
+ xfer += iprot->readI32(ecast360);
+ this->compression = (Compression::type)ecast360;
+ isset_compression = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_query)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_compression)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_execute_cql_query_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_execute_cql_query_args");
+ xfer += oprot->writeFieldBegin("query", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->query);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("compression", ::apache::thrift::protocol::T_I32, 2);
+ xfer += oprot->writeI32((int32_t)this->compression);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_execute_cql_query_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_execute_cql_query_pargs");
+ xfer += oprot->writeFieldBegin("query", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary((*(this->query)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("compression", ::apache::thrift::protocol::T_I32, 2);
+ xfer += oprot->writeI32((int32_t)(*(this->compression)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_execute_cql_query_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->success.read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_execute_cql_query_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_execute_cql_query_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+ xfer += this->success.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.sde) {
+ xfer += oprot->writeFieldBegin("sde", ::apache::thrift::protocol::T_STRUCT, 4);
+ xfer += this->sde.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_execute_cql_query_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += (*(this->success)).read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_prepare_cql_query_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_query = false;
+ bool isset_compression = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->query);
+ isset_query = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast361;
+ xfer += iprot->readI32(ecast361);
+ this->compression = (Compression::type)ecast361;
+ isset_compression = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_query)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_compression)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_prepare_cql_query_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_prepare_cql_query_args");
+ xfer += oprot->writeFieldBegin("query", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->query);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("compression", ::apache::thrift::protocol::T_I32, 2);
+ xfer += oprot->writeI32((int32_t)this->compression);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_prepare_cql_query_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_prepare_cql_query_pargs");
+ xfer += oprot->writeFieldBegin("query", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary((*(this->query)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("compression", ::apache::thrift::protocol::T_I32, 2);
+ xfer += oprot->writeI32((int32_t)(*(this->compression)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_prepare_cql_query_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->success.read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_prepare_cql_query_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_prepare_cql_query_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+ xfer += this->success.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_prepare_cql_query_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += (*(this->success)).read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_execute_prepared_cql_query_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_itemId = false;
+ bool isset_values = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->itemId);
+ isset_itemId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->values.clear();
+ uint32_t _size362;
+ ::apache::thrift::protocol::TType _etype365;
+ iprot->readListBegin(_etype365, _size362);
+ this->values.resize(_size362);
+ uint32_t _i366;
+ for (_i366 = 0; _i366 < _size362; ++_i366)
+ {
+ xfer += iprot->readBinary(this->values[_i366]);
+ }
+ iprot->readListEnd();
+ }
+ isset_values = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_itemId)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_values)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_execute_prepared_cql_query_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_execute_prepared_cql_query_args");
+ xfer += oprot->writeFieldBegin("itemId", ::apache::thrift::protocol::T_I32, 1);
+ xfer += oprot->writeI32(this->itemId);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 2);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->values.size()));
+ std::vector<std::string> ::const_iterator _iter367;
+ for (_iter367 = this->values.begin(); _iter367 != this->values.end(); ++_iter367)
+ {
+ xfer += oprot->writeBinary((*_iter367));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_execute_prepared_cql_query_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_execute_prepared_cql_query_pargs");
+ xfer += oprot->writeFieldBegin("itemId", ::apache::thrift::protocol::T_I32, 1);
+ xfer += oprot->writeI32((*(this->itemId)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 2);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->values)).size()));
+ std::vector<std::string> ::const_iterator _iter368;
+ for (_iter368 = (*(this->values)).begin(); _iter368 != (*(this->values)).end(); ++_iter368)
+ {
+ xfer += oprot->writeBinary((*_iter368));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_execute_prepared_cql_query_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->success.read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_execute_prepared_cql_query_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_execute_prepared_cql_query_result");
+
+ if (this->__isset.success) {
+ xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+ xfer += this->success.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.ue) {
+ xfer += oprot->writeFieldBegin("ue", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->ue.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.te) {
+ xfer += oprot->writeFieldBegin("te", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->te.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ } else if (this->__isset.sde) {
+ xfer += oprot->writeFieldBegin("sde", ::apache::thrift::protocol::T_STRUCT, 4);
+ xfer += this->sde.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_execute_prepared_cql_query_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 0:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += (*(this->success)).read(iprot);
+ this->__isset.success = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ue.read(iprot);
+ this->__isset.ue = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->te.read(iprot);
+ this->__isset.te = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->sde.read(iprot);
+ this->__isset.sde = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_set_cql_version_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_version = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->version);
+ isset_version = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_version)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Cassandra_set_cql_version_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_set_cql_version_args");
+ xfer += oprot->writeFieldBegin("version", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->version);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_set_cql_version_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Cassandra_set_cql_version_pargs");
+ xfer += oprot->writeFieldBegin("version", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString((*(this->version)));
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_set_cql_version_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Cassandra_set_cql_version_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+ uint32_t xfer = 0;
+
+ xfer += oprot->writeStructBegin("Cassandra_set_cql_version_result");
+
+ if (this->__isset.ire) {
+ xfer += oprot->writeFieldBegin("ire", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->ire.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+uint32_t Cassandra_set_cql_version_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->ire.read(iprot);
+ this->__isset.ire = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+void CassandraClient::login(const AuthenticationRequest& auth_request)
+{
+ send_login(auth_request);
+ recv_login();
+}
+
+void CassandraClient::send_login(const AuthenticationRequest& auth_request)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("login", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_login_pargs args;
+ args.auth_request = &auth_request;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_login()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("login") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_login_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.authnx) {
+ throw result.authnx;
+ }
+ if (result.__isset.authzx) {
+ throw result.authzx;
+ }
+ return;
+}
+
+void CassandraClient::set_keyspace(const std::string& keyspace)
+{
+ send_set_keyspace(keyspace);
+ recv_set_keyspace();
+}
+
+void CassandraClient::send_set_keyspace(const std::string& keyspace)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("set_keyspace", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_set_keyspace_pargs args;
+ args.keyspace = &keyspace;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_set_keyspace()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("set_keyspace") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_set_keyspace_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ return;
+}
+
+void CassandraClient::get(ColumnOrSuperColumn& _return, const std::string& key, const ColumnPath& column_path, const ConsistencyLevel::type consistency_level)
+{
+ send_get(key, column_path, consistency_level);
+ recv_get(_return);
+}
+
+void CassandraClient::send_get(const std::string& key, const ColumnPath& column_path, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("get", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_get_pargs args;
+ args.key = &key;
+ args.column_path = &column_path;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_get(ColumnOrSuperColumn& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("get") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_get_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.nfe) {
+ throw result.nfe;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get failed: unknown result");
+}
+
+void CassandraClient::get_slice(std::vector<ColumnOrSuperColumn> & _return, const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level)
+{
+ send_get_slice(key, column_parent, predicate, consistency_level);
+ recv_get_slice(_return);
+}
+
+void CassandraClient::send_get_slice(const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("get_slice", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_get_slice_pargs args;
+ args.key = &key;
+ args.column_parent = &column_parent;
+ args.predicate = &predicate;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_get_slice(std::vector<ColumnOrSuperColumn> & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("get_slice") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_get_slice_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_slice failed: unknown result");
+}
+
+int32_t CassandraClient::get_count(const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level)
+{
+ send_get_count(key, column_parent, predicate, consistency_level);
+ return recv_get_count();
+}
+
+void CassandraClient::send_get_count(const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("get_count", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_get_count_pargs args;
+ args.key = &key;
+ args.column_parent = &column_parent;
+ args.predicate = &predicate;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+int32_t CassandraClient::recv_get_count()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("get_count") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ int32_t _return;
+ Cassandra_get_count_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ return _return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_count failed: unknown result");
+}
+
+void CassandraClient::multiget_slice(std::map<std::string, std::vector<ColumnOrSuperColumn> > & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level)
+{
+ send_multiget_slice(keys, column_parent, predicate, consistency_level);
+ recv_multiget_slice(_return);
+}
+
+void CassandraClient::send_multiget_slice(const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("multiget_slice", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_multiget_slice_pargs args;
+ args.keys = &keys;
+ args.column_parent = &column_parent;
+ args.predicate = &predicate;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_multiget_slice(std::map<std::string, std::vector<ColumnOrSuperColumn> > & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("multiget_slice") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_multiget_slice_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "multiget_slice failed: unknown result");
+}
+
+void CassandraClient::multiget_count(std::map<std::string, int32_t> & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level)
+{
+ send_multiget_count(keys, column_parent, predicate, consistency_level);
+ recv_multiget_count(_return);
+}
+
+void CassandraClient::send_multiget_count(const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("multiget_count", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_multiget_count_pargs args;
+ args.keys = &keys;
+ args.column_parent = &column_parent;
+ args.predicate = &predicate;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_multiget_count(std::map<std::string, int32_t> & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("multiget_count") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_multiget_count_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "multiget_count failed: unknown result");
+}
+
+void CassandraClient::get_range_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const SlicePredicate& predicate, const KeyRange& range, const ConsistencyLevel::type consistency_level)
+{
+ send_get_range_slices(column_parent, predicate, range, consistency_level);
+ recv_get_range_slices(_return);
+}
+
+void CassandraClient::send_get_range_slices(const ColumnParent& column_parent, const SlicePredicate& predicate, const KeyRange& range, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("get_range_slices", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_get_range_slices_pargs args;
+ args.column_parent = &column_parent;
+ args.predicate = &predicate;
+ args.range = &range;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_get_range_slices(std::vector<KeySlice> & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("get_range_slices") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_get_range_slices_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_range_slices failed: unknown result");
+}
+
+void CassandraClient::get_paged_slice(std::vector<KeySlice> & _return, const std::string& column_family, const KeyRange& range, const std::string& start_column, const ConsistencyLevel::type consistency_level)
+{
+ send_get_paged_slice(column_family, range, start_column, consistency_level);
+ recv_get_paged_slice(_return);
+}
+
+void CassandraClient::send_get_paged_slice(const std::string& column_family, const KeyRange& range, const std::string& start_column, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("get_paged_slice", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_get_paged_slice_pargs args;
+ args.column_family = &column_family;
+ args.range = &range;
+ args.start_column = &start_column;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_get_paged_slice(std::vector<KeySlice> & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("get_paged_slice") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_get_paged_slice_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_paged_slice failed: unknown result");
+}
+
+void CassandraClient::get_indexed_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const IndexClause& index_clause, const SlicePredicate& column_predicate, const ConsistencyLevel::type consistency_level)
+{
+ send_get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level);
+ recv_get_indexed_slices(_return);
+}
+
+void CassandraClient::send_get_indexed_slices(const ColumnParent& column_parent, const IndexClause& index_clause, const SlicePredicate& column_predicate, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("get_indexed_slices", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_get_indexed_slices_pargs args;
+ args.column_parent = &column_parent;
+ args.index_clause = &index_clause;
+ args.column_predicate = &column_predicate;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_get_indexed_slices(std::vector<KeySlice> & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("get_indexed_slices") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_get_indexed_slices_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_indexed_slices failed: unknown result");
+}
+
+void CassandraClient::insert(const std::string& key, const ColumnParent& column_parent, const Column& column, const ConsistencyLevel::type consistency_level)
+{
+ send_insert(key, column_parent, column, consistency_level);
+ recv_insert();
+}
+
+void CassandraClient::send_insert(const std::string& key, const ColumnParent& column_parent, const Column& column, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("insert", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_insert_pargs args;
+ args.key = &key;
+ args.column_parent = &column_parent;
+ args.column = &column;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_insert()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("insert") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_insert_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ return;
+}
+
+void CassandraClient::add(const std::string& key, const ColumnParent& column_parent, const CounterColumn& column, const ConsistencyLevel::type consistency_level)
+{
+ send_add(key, column_parent, column, consistency_level);
+ recv_add();
+}
+
+void CassandraClient::send_add(const std::string& key, const ColumnParent& column_parent, const CounterColumn& column, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("add", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_add_pargs args;
+ args.key = &key;
+ args.column_parent = &column_parent;
+ args.column = &column;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_add()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("add") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_add_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ return;
+}
+
+void CassandraClient::remove(const std::string& key, const ColumnPath& column_path, const int64_t timestamp, const ConsistencyLevel::type consistency_level)
+{
+ send_remove(key, column_path, timestamp, consistency_level);
+ recv_remove();
+}
+
+void CassandraClient::send_remove(const std::string& key, const ColumnPath& column_path, const int64_t timestamp, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("remove", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_remove_pargs args;
+ args.key = &key;
+ args.column_path = &column_path;
+ args.timestamp = &timestamp;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_remove()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("remove") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_remove_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ return;
+}
+
+void CassandraClient::remove_counter(const std::string& key, const ColumnPath& path, const ConsistencyLevel::type consistency_level)
+{
+ send_remove_counter(key, path, consistency_level);
+ recv_remove_counter();
+}
+
+void CassandraClient::send_remove_counter(const std::string& key, const ColumnPath& path, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("remove_counter", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_remove_counter_pargs args;
+ args.key = &key;
+ args.path = &path;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_remove_counter()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("remove_counter") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_remove_counter_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ return;
+}
+
+void CassandraClient::batch_mutate(const std::map<std::string, std::map<std::string, std::vector<Mutation> > > & mutation_map, const ConsistencyLevel::type consistency_level)
+{
+ send_batch_mutate(mutation_map, consistency_level);
+ recv_batch_mutate();
+}
+
+void CassandraClient::send_batch_mutate(const std::map<std::string, std::map<std::string, std::vector<Mutation> > > & mutation_map, const ConsistencyLevel::type consistency_level)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("batch_mutate", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_batch_mutate_pargs args;
+ args.mutation_map = &mutation_map;
+ args.consistency_level = &consistency_level;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_batch_mutate()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("batch_mutate") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_batch_mutate_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ return;
+}
+
+void CassandraClient::truncate(const std::string& cfname)
+{
+ send_truncate(cfname);
+ recv_truncate();
+}
+
+void CassandraClient::send_truncate(const std::string& cfname)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("truncate", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_truncate_pargs args;
+ args.cfname = &cfname;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_truncate()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("truncate") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_truncate_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ return;
+}
+
+void CassandraClient::describe_schema_versions(std::map<std::string, std::vector<std::string> > & _return)
+{
+ send_describe_schema_versions();
+ recv_describe_schema_versions(_return);
+}
+
+void CassandraClient::send_describe_schema_versions()
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("describe_schema_versions", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_describe_schema_versions_pargs args;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_describe_schema_versions(std::map<std::string, std::vector<std::string> > & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("describe_schema_versions") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_describe_schema_versions_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "describe_schema_versions failed: unknown result");
+}
+
+void CassandraClient::describe_keyspaces(std::vector<KsDef> & _return)
+{
+ send_describe_keyspaces();
+ recv_describe_keyspaces(_return);
+}
+
+void CassandraClient::send_describe_keyspaces()
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("describe_keyspaces", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_describe_keyspaces_pargs args;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_describe_keyspaces(std::vector<KsDef> & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("describe_keyspaces") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_describe_keyspaces_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "describe_keyspaces failed: unknown result");
+}
+
+void CassandraClient::describe_cluster_name(std::string& _return)
+{
+ send_describe_cluster_name();
+ recv_describe_cluster_name(_return);
+}
+
+void CassandraClient::send_describe_cluster_name()
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("describe_cluster_name", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_describe_cluster_name_pargs args;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_describe_cluster_name(std::string& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("describe_cluster_name") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_describe_cluster_name_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "describe_cluster_name failed: unknown result");
+}
+
+void CassandraClient::describe_version(std::string& _return)
+{
+ send_describe_version();
+ recv_describe_version(_return);
+}
+
+void CassandraClient::send_describe_version()
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("describe_version", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_describe_version_pargs args;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_describe_version(std::string& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("describe_version") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_describe_version_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "describe_version failed: unknown result");
+}
+
+void CassandraClient::describe_ring(std::vector<TokenRange> & _return, const std::string& keyspace)
+{
+ send_describe_ring(keyspace);
+ recv_describe_ring(_return);
+}
+
+void CassandraClient::send_describe_ring(const std::string& keyspace)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("describe_ring", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_describe_ring_pargs args;
+ args.keyspace = &keyspace;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_describe_ring(std::vector<TokenRange> & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("describe_ring") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_describe_ring_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "describe_ring failed: unknown result");
+}
+
+void CassandraClient::describe_token_map(std::map<std::string, std::string> & _return)
+{
+ send_describe_token_map();
+ recv_describe_token_map(_return);
+}
+
+void CassandraClient::send_describe_token_map()
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("describe_token_map", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_describe_token_map_pargs args;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_describe_token_map(std::map<std::string, std::string> & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("describe_token_map") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_describe_token_map_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "describe_token_map failed: unknown result");
+}
+
+void CassandraClient::describe_partitioner(std::string& _return)
+{
+ send_describe_partitioner();
+ recv_describe_partitioner(_return);
+}
+
+void CassandraClient::send_describe_partitioner()
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("describe_partitioner", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_describe_partitioner_pargs args;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_describe_partitioner(std::string& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("describe_partitioner") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_describe_partitioner_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "describe_partitioner failed: unknown result");
+}
+
+void CassandraClient::describe_snitch(std::string& _return)
+{
+ send_describe_snitch();
+ recv_describe_snitch(_return);
+}
+
+void CassandraClient::send_describe_snitch()
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("describe_snitch", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_describe_snitch_pargs args;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_describe_snitch(std::string& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("describe_snitch") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_describe_snitch_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "describe_snitch failed: unknown result");
+}
+
+void CassandraClient::describe_keyspace(KsDef& _return, const std::string& keyspace)
+{
+ send_describe_keyspace(keyspace);
+ recv_describe_keyspace(_return);
+}
+
+void CassandraClient::send_describe_keyspace(const std::string& keyspace)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("describe_keyspace", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_describe_keyspace_pargs args;
+ args.keyspace = &keyspace;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_describe_keyspace(KsDef& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("describe_keyspace") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_describe_keyspace_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.nfe) {
+ throw result.nfe;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "describe_keyspace failed: unknown result");
+}
+
+void CassandraClient::describe_splits(std::vector<std::string> & _return, const std::string& cfName, const std::string& start_token, const std::string& end_token, const int32_t keys_per_split)
+{
+ send_describe_splits(cfName, start_token, end_token, keys_per_split);
+ recv_describe_splits(_return);
+}
+
+void CassandraClient::send_describe_splits(const std::string& cfName, const std::string& start_token, const std::string& end_token, const int32_t keys_per_split)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("describe_splits", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_describe_splits_pargs args;
+ args.cfName = &cfName;
+ args.start_token = &start_token;
+ args.end_token = &end_token;
+ args.keys_per_split = &keys_per_split;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_describe_splits(std::vector<std::string> & _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("describe_splits") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_describe_splits_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "describe_splits failed: unknown result");
+}
+
+void CassandraClient::system_add_column_family(std::string& _return, const CfDef& cf_def)
+{
+ send_system_add_column_family(cf_def);
+ recv_system_add_column_family(_return);
+}
+
+void CassandraClient::send_system_add_column_family(const CfDef& cf_def)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("system_add_column_family", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_system_add_column_family_pargs args;
+ args.cf_def = &cf_def;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_system_add_column_family(std::string& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("system_add_column_family") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_system_add_column_family_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.sde) {
+ throw result.sde;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "system_add_column_family failed: unknown result");
+}
+
+void CassandraClient::system_drop_column_family(std::string& _return, const std::string& column_family)
+{
+ send_system_drop_column_family(column_family);
+ recv_system_drop_column_family(_return);
+}
+
+void CassandraClient::send_system_drop_column_family(const std::string& column_family)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("system_drop_column_family", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_system_drop_column_family_pargs args;
+ args.column_family = &column_family;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_system_drop_column_family(std::string& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("system_drop_column_family") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_system_drop_column_family_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.sde) {
+ throw result.sde;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "system_drop_column_family failed: unknown result");
+}
+
+void CassandraClient::system_add_keyspace(std::string& _return, const KsDef& ks_def)
+{
+ send_system_add_keyspace(ks_def);
+ recv_system_add_keyspace(_return);
+}
+
+void CassandraClient::send_system_add_keyspace(const KsDef& ks_def)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("system_add_keyspace", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_system_add_keyspace_pargs args;
+ args.ks_def = &ks_def;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_system_add_keyspace(std::string& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("system_add_keyspace") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_system_add_keyspace_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.sde) {
+ throw result.sde;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "system_add_keyspace failed: unknown result");
+}
+
+void CassandraClient::system_drop_keyspace(std::string& _return, const std::string& keyspace)
+{
+ send_system_drop_keyspace(keyspace);
+ recv_system_drop_keyspace(_return);
+}
+
+void CassandraClient::send_system_drop_keyspace(const std::string& keyspace)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("system_drop_keyspace", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_system_drop_keyspace_pargs args;
+ args.keyspace = &keyspace;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_system_drop_keyspace(std::string& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("system_drop_keyspace") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_system_drop_keyspace_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.sde) {
+ throw result.sde;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "system_drop_keyspace failed: unknown result");
+}
+
+void CassandraClient::system_update_keyspace(std::string& _return, const KsDef& ks_def)
+{
+ send_system_update_keyspace(ks_def);
+ recv_system_update_keyspace(_return);
+}
+
+void CassandraClient::send_system_update_keyspace(const KsDef& ks_def)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("system_update_keyspace", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_system_update_keyspace_pargs args;
+ args.ks_def = &ks_def;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_system_update_keyspace(std::string& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("system_update_keyspace") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_system_update_keyspace_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.sde) {
+ throw result.sde;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "system_update_keyspace failed: unknown result");
+}
+
+void CassandraClient::system_update_column_family(std::string& _return, const CfDef& cf_def)
+{
+ send_system_update_column_family(cf_def);
+ recv_system_update_column_family(_return);
+}
+
+void CassandraClient::send_system_update_column_family(const CfDef& cf_def)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("system_update_column_family", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_system_update_column_family_pargs args;
+ args.cf_def = &cf_def;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_system_update_column_family(std::string& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("system_update_column_family") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_system_update_column_family_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.sde) {
+ throw result.sde;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "system_update_column_family failed: unknown result");
+}
+
+void CassandraClient::execute_cql_query(CqlResult& _return, const std::string& query, const Compression::type compression)
+{
+ send_execute_cql_query(query, compression);
+ recv_execute_cql_query(_return);
+}
+
+void CassandraClient::send_execute_cql_query(const std::string& query, const Compression::type compression)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("execute_cql_query", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_execute_cql_query_pargs args;
+ args.query = &query;
+ args.compression = &compression;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_execute_cql_query(CqlResult& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("execute_cql_query") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_execute_cql_query_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ if (result.__isset.sde) {
+ throw result.sde;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "execute_cql_query failed: unknown result");
+}
+
+void CassandraClient::prepare_cql_query(CqlPreparedResult& _return, const std::string& query, const Compression::type compression)
+{
+ send_prepare_cql_query(query, compression);
+ recv_prepare_cql_query(_return);
+}
+
+void CassandraClient::send_prepare_cql_query(const std::string& query, const Compression::type compression)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("prepare_cql_query", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_prepare_cql_query_pargs args;
+ args.query = &query;
+ args.compression = &compression;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_prepare_cql_query(CqlPreparedResult& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("prepare_cql_query") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_prepare_cql_query_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "prepare_cql_query failed: unknown result");
+}
+
+void CassandraClient::execute_prepared_cql_query(CqlResult& _return, const int32_t itemId, const std::vector<std::string> & values)
+{
+ send_execute_prepared_cql_query(itemId, values);
+ recv_execute_prepared_cql_query(_return);
+}
+
+void CassandraClient::send_execute_prepared_cql_query(const int32_t itemId, const std::vector<std::string> & values)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("execute_prepared_cql_query", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_execute_prepared_cql_query_pargs args;
+ args.itemId = &itemId;
+ args.values = &values;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_execute_prepared_cql_query(CqlResult& _return)
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("execute_prepared_cql_query") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_execute_prepared_cql_query_presult result;
+ result.success = &_return;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.success) {
+ // _return pointer has now been filled
+ return;
+ }
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ if (result.__isset.ue) {
+ throw result.ue;
+ }
+ if (result.__isset.te) {
+ throw result.te;
+ }
+ if (result.__isset.sde) {
+ throw result.sde;
+ }
+ throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "execute_prepared_cql_query failed: unknown result");
+}
+
+void CassandraClient::set_cql_version(const std::string& version)
+{
+ send_set_cql_version(version);
+ recv_set_cql_version();
+}
+
+void CassandraClient::send_set_cql_version(const std::string& version)
+{
+ int32_t cseqid = 0;
+ oprot_->writeMessageBegin("set_cql_version", ::apache::thrift::protocol::T_CALL, cseqid);
+
+ Cassandra_set_cql_version_pargs args;
+ args.version = &version;
+ args.write(oprot_);
+
+ oprot_->writeMessageEnd();
+ oprot_->getTransport()->writeEnd();
+ oprot_->getTransport()->flush();
+}
+
+void CassandraClient::recv_set_cql_version()
+{
+
+ int32_t rseqid = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+
+ iprot_->readMessageBegin(fname, mtype, rseqid);
+ if (mtype == ::apache::thrift::protocol::T_EXCEPTION) {
+ ::apache::thrift::TApplicationException x;
+ x.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ throw x;
+ }
+ if (mtype != ::apache::thrift::protocol::T_REPLY) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ if (fname.compare("set_cql_version") != 0) {
+ iprot_->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+ }
+ Cassandra_set_cql_version_presult result;
+ result.read(iprot_);
+ iprot_->readMessageEnd();
+ iprot_->getTransport()->readEnd();
+
+ if (result.__isset.ire) {
+ throw result.ire;
+ }
+ return;
+}
+
+bool CassandraProcessor::process(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> piprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> poprot, void* callContext) {
+
+ ::apache::thrift::protocol::TProtocol* iprot = piprot.get();
+ ::apache::thrift::protocol::TProtocol* oprot = poprot.get();
+ std::string fname;
+ ::apache::thrift::protocol::TMessageType mtype;
+ int32_t seqid;
+
+ iprot->readMessageBegin(fname, mtype, seqid);
+
+ if (mtype != ::apache::thrift::protocol::T_CALL && mtype != ::apache::thrift::protocol::T_ONEWAY) {
+ iprot->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot->readMessageEnd();
+ iprot->getTransport()->readEnd();
+ ::apache::thrift::TApplicationException x(::apache::thrift::TApplicationException::INVALID_MESSAGE_TYPE);
+ oprot->writeMessageBegin(fname, ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return true;
+ }
+
+ return process_fn(iprot, oprot, fname, seqid, callContext);
+}
+
+bool CassandraProcessor::process_fn( ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, std::string& fname, int32_t seqid, void* callContext) {
+ std::map<std::string, void (CassandraProcessor::*)(int32_t, ::apache::thrift::protocol::TProtocol*, ::apache::thrift::protocol::TProtocol*, void*)>::iterator pfn;
+ pfn = processMap_.find(fname);
+ if (pfn == processMap_.end()) {
+ iprot->skip(::apache::thrift::protocol::T_STRUCT);
+ iprot->readMessageEnd();
+ iprot->getTransport()->readEnd();
+ ::apache::thrift::TApplicationException x(::apache::thrift::TApplicationException::UNKNOWN_METHOD, "Invalid method name: '"+fname+"'");
+ oprot->writeMessageBegin(fname, ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return true;
+ }
+ (this->*(pfn->second))(seqid, iprot, oprot, callContext);
+ return true;
+}
+
+void CassandraProcessor::process_login(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.login", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.login");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.login");
+ }
+
+ Cassandra_login_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.login", bytes);
+ }
+
+ Cassandra_login_result result;
+ try {
+ iface_->login(args.auth_request);
+ } catch (AuthenticationException &authnx) {
+ result.authnx = authnx;
+ result.__isset.authnx = true;
+ } catch (AuthorizationException &authzx) {
+ result.authzx = authzx;
+ result.__isset.authzx = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.login");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("login", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.login");
+ }
+
+ oprot->writeMessageBegin("login", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.login", bytes);
+ }
+}
+
+void CassandraProcessor::process_set_keyspace(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.set_keyspace", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.set_keyspace");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.set_keyspace");
+ }
+
+ Cassandra_set_keyspace_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.set_keyspace", bytes);
+ }
+
+ Cassandra_set_keyspace_result result;
+ try {
+ iface_->set_keyspace(args.keyspace);
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.set_keyspace");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("set_keyspace", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.set_keyspace");
+ }
+
+ oprot->writeMessageBegin("set_keyspace", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.set_keyspace", bytes);
+ }
+}
+
+void CassandraProcessor::process_get(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.get", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.get");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.get");
+ }
+
+ Cassandra_get_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.get", bytes);
+ }
+
+ Cassandra_get_result result;
+ try {
+ iface_->get(result.success, args.key, args.column_path, args.consistency_level);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (NotFoundException &nfe) {
+ result.nfe = nfe;
+ result.__isset.nfe = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.get");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("get", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.get");
+ }
+
+ oprot->writeMessageBegin("get", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.get", bytes);
+ }
+}
+
+void CassandraProcessor::process_get_slice(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.get_slice", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.get_slice");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.get_slice");
+ }
+
+ Cassandra_get_slice_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.get_slice", bytes);
+ }
+
+ Cassandra_get_slice_result result;
+ try {
+ iface_->get_slice(result.success, args.key, args.column_parent, args.predicate, args.consistency_level);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.get_slice");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("get_slice", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.get_slice");
+ }
+
+ oprot->writeMessageBegin("get_slice", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.get_slice", bytes);
+ }
+}
+
+void CassandraProcessor::process_get_count(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.get_count", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.get_count");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.get_count");
+ }
+
+ Cassandra_get_count_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.get_count", bytes);
+ }
+
+ Cassandra_get_count_result result;
+ try {
+ result.success = iface_->get_count(args.key, args.column_parent, args.predicate, args.consistency_level);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.get_count");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("get_count", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.get_count");
+ }
+
+ oprot->writeMessageBegin("get_count", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.get_count", bytes);
+ }
+}
+
+void CassandraProcessor::process_multiget_slice(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.multiget_slice", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.multiget_slice");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.multiget_slice");
+ }
+
+ Cassandra_multiget_slice_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.multiget_slice", bytes);
+ }
+
+ Cassandra_multiget_slice_result result;
+ try {
+ iface_->multiget_slice(result.success, args.keys, args.column_parent, args.predicate, args.consistency_level);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.multiget_slice");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("multiget_slice", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.multiget_slice");
+ }
+
+ oprot->writeMessageBegin("multiget_slice", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.multiget_slice", bytes);
+ }
+}
+
+void CassandraProcessor::process_multiget_count(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.multiget_count", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.multiget_count");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.multiget_count");
+ }
+
+ Cassandra_multiget_count_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.multiget_count", bytes);
+ }
+
+ Cassandra_multiget_count_result result;
+ try {
+ iface_->multiget_count(result.success, args.keys, args.column_parent, args.predicate, args.consistency_level);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.multiget_count");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("multiget_count", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.multiget_count");
+ }
+
+ oprot->writeMessageBegin("multiget_count", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.multiget_count", bytes);
+ }
+}
+
+void CassandraProcessor::process_get_range_slices(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.get_range_slices", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.get_range_slices");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.get_range_slices");
+ }
+
+ Cassandra_get_range_slices_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.get_range_slices", bytes);
+ }
+
+ Cassandra_get_range_slices_result result;
+ try {
+ iface_->get_range_slices(result.success, args.column_parent, args.predicate, args.range, args.consistency_level);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.get_range_slices");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("get_range_slices", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.get_range_slices");
+ }
+
+ oprot->writeMessageBegin("get_range_slices", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.get_range_slices", bytes);
+ }
+}
+
+void CassandraProcessor::process_get_paged_slice(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.get_paged_slice", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.get_paged_slice");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.get_paged_slice");
+ }
+
+ Cassandra_get_paged_slice_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.get_paged_slice", bytes);
+ }
+
+ Cassandra_get_paged_slice_result result;
+ try {
+ iface_->get_paged_slice(result.success, args.column_family, args.range, args.start_column, args.consistency_level);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.get_paged_slice");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("get_paged_slice", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.get_paged_slice");
+ }
+
+ oprot->writeMessageBegin("get_paged_slice", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.get_paged_slice", bytes);
+ }
+}
+
+void CassandraProcessor::process_get_indexed_slices(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.get_indexed_slices", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.get_indexed_slices");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.get_indexed_slices");
+ }
+
+ Cassandra_get_indexed_slices_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.get_indexed_slices", bytes);
+ }
+
+ Cassandra_get_indexed_slices_result result;
+ try {
+ iface_->get_indexed_slices(result.success, args.column_parent, args.index_clause, args.column_predicate, args.consistency_level);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.get_indexed_slices");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("get_indexed_slices", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.get_indexed_slices");
+ }
+
+ oprot->writeMessageBegin("get_indexed_slices", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.get_indexed_slices", bytes);
+ }
+}
+
+void CassandraProcessor::process_insert(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.insert", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.insert");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.insert");
+ }
+
+ Cassandra_insert_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.insert", bytes);
+ }
+
+ Cassandra_insert_result result;
+ try {
+ iface_->insert(args.key, args.column_parent, args.column, args.consistency_level);
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.insert");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("insert", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.insert");
+ }
+
+ oprot->writeMessageBegin("insert", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.insert", bytes);
+ }
+}
+
+void CassandraProcessor::process_add(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.add", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.add");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.add");
+ }
+
+ Cassandra_add_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.add", bytes);
+ }
+
+ Cassandra_add_result result;
+ try {
+ iface_->add(args.key, args.column_parent, args.column, args.consistency_level);
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.add");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("add", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.add");
+ }
+
+ oprot->writeMessageBegin("add", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.add", bytes);
+ }
+}
+
+void CassandraProcessor::process_remove(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.remove", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.remove");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.remove");
+ }
+
+ Cassandra_remove_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.remove", bytes);
+ }
+
+ Cassandra_remove_result result;
+ try {
+ iface_->remove(args.key, args.column_path, args.timestamp, args.consistency_level);
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.remove");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("remove", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.remove");
+ }
+
+ oprot->writeMessageBegin("remove", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.remove", bytes);
+ }
+}
+
+void CassandraProcessor::process_remove_counter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.remove_counter", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.remove_counter");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.remove_counter");
+ }
+
+ Cassandra_remove_counter_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.remove_counter", bytes);
+ }
+
+ Cassandra_remove_counter_result result;
+ try {
+ iface_->remove_counter(args.key, args.path, args.consistency_level);
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.remove_counter");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("remove_counter", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.remove_counter");
+ }
+
+ oprot->writeMessageBegin("remove_counter", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.remove_counter", bytes);
+ }
+}
+
+void CassandraProcessor::process_batch_mutate(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.batch_mutate", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.batch_mutate");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.batch_mutate");
+ }
+
+ Cassandra_batch_mutate_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.batch_mutate", bytes);
+ }
+
+ Cassandra_batch_mutate_result result;
+ try {
+ iface_->batch_mutate(args.mutation_map, args.consistency_level);
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.batch_mutate");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("batch_mutate", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.batch_mutate");
+ }
+
+ oprot->writeMessageBegin("batch_mutate", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.batch_mutate", bytes);
+ }
+}
+
+void CassandraProcessor::process_truncate(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.truncate", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.truncate");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.truncate");
+ }
+
+ Cassandra_truncate_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.truncate", bytes);
+ }
+
+ Cassandra_truncate_result result;
+ try {
+ iface_->truncate(args.cfname);
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.truncate");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("truncate", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.truncate");
+ }
+
+ oprot->writeMessageBegin("truncate", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.truncate", bytes);
+ }
+}
+
+void CassandraProcessor::process_describe_schema_versions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.describe_schema_versions", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.describe_schema_versions");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.describe_schema_versions");
+ }
+
+ Cassandra_describe_schema_versions_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.describe_schema_versions", bytes);
+ }
+
+ Cassandra_describe_schema_versions_result result;
+ try {
+ iface_->describe_schema_versions(result.success);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.describe_schema_versions");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("describe_schema_versions", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.describe_schema_versions");
+ }
+
+ oprot->writeMessageBegin("describe_schema_versions", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.describe_schema_versions", bytes);
+ }
+}
+
+void CassandraProcessor::process_describe_keyspaces(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.describe_keyspaces", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.describe_keyspaces");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.describe_keyspaces");
+ }
+
+ Cassandra_describe_keyspaces_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.describe_keyspaces", bytes);
+ }
+
+ Cassandra_describe_keyspaces_result result;
+ try {
+ iface_->describe_keyspaces(result.success);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.describe_keyspaces");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("describe_keyspaces", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.describe_keyspaces");
+ }
+
+ oprot->writeMessageBegin("describe_keyspaces", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.describe_keyspaces", bytes);
+ }
+}
+
+void CassandraProcessor::process_describe_cluster_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.describe_cluster_name", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.describe_cluster_name");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.describe_cluster_name");
+ }
+
+ Cassandra_describe_cluster_name_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.describe_cluster_name", bytes);
+ }
+
+ Cassandra_describe_cluster_name_result result;
+ try {
+ iface_->describe_cluster_name(result.success);
+ result.__isset.success = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.describe_cluster_name");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("describe_cluster_name", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.describe_cluster_name");
+ }
+
+ oprot->writeMessageBegin("describe_cluster_name", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.describe_cluster_name", bytes);
+ }
+}
+
+void CassandraProcessor::process_describe_version(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.describe_version", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.describe_version");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.describe_version");
+ }
+
+ Cassandra_describe_version_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.describe_version", bytes);
+ }
+
+ Cassandra_describe_version_result result;
+ try {
+ iface_->describe_version(result.success);
+ result.__isset.success = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.describe_version");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("describe_version", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.describe_version");
+ }
+
+ oprot->writeMessageBegin("describe_version", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.describe_version", bytes);
+ }
+}
+
+void CassandraProcessor::process_describe_ring(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.describe_ring", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.describe_ring");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.describe_ring");
+ }
+
+ Cassandra_describe_ring_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.describe_ring", bytes);
+ }
+
+ Cassandra_describe_ring_result result;
+ try {
+ iface_->describe_ring(result.success, args.keyspace);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.describe_ring");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("describe_ring", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.describe_ring");
+ }
+
+ oprot->writeMessageBegin("describe_ring", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.describe_ring", bytes);
+ }
+}
+
+void CassandraProcessor::process_describe_token_map(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.describe_token_map", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.describe_token_map");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.describe_token_map");
+ }
+
+ Cassandra_describe_token_map_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.describe_token_map", bytes);
+ }
+
+ Cassandra_describe_token_map_result result;
+ try {
+ iface_->describe_token_map(result.success);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.describe_token_map");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("describe_token_map", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.describe_token_map");
+ }
+
+ oprot->writeMessageBegin("describe_token_map", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.describe_token_map", bytes);
+ }
+}
+
+void CassandraProcessor::process_describe_partitioner(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.describe_partitioner", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.describe_partitioner");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.describe_partitioner");
+ }
+
+ Cassandra_describe_partitioner_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.describe_partitioner", bytes);
+ }
+
+ Cassandra_describe_partitioner_result result;
+ try {
+ iface_->describe_partitioner(result.success);
+ result.__isset.success = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.describe_partitioner");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("describe_partitioner", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.describe_partitioner");
+ }
+
+ oprot->writeMessageBegin("describe_partitioner", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.describe_partitioner", bytes);
+ }
+}
+
+void CassandraProcessor::process_describe_snitch(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.describe_snitch", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.describe_snitch");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.describe_snitch");
+ }
+
+ Cassandra_describe_snitch_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.describe_snitch", bytes);
+ }
+
+ Cassandra_describe_snitch_result result;
+ try {
+ iface_->describe_snitch(result.success);
+ result.__isset.success = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.describe_snitch");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("describe_snitch", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.describe_snitch");
+ }
+
+ oprot->writeMessageBegin("describe_snitch", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.describe_snitch", bytes);
+ }
+}
+
+void CassandraProcessor::process_describe_keyspace(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.describe_keyspace", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.describe_keyspace");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.describe_keyspace");
+ }
+
+ Cassandra_describe_keyspace_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.describe_keyspace", bytes);
+ }
+
+ Cassandra_describe_keyspace_result result;
+ try {
+ iface_->describe_keyspace(result.success, args.keyspace);
+ result.__isset.success = true;
+ } catch (NotFoundException &nfe) {
+ result.nfe = nfe;
+ result.__isset.nfe = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.describe_keyspace");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("describe_keyspace", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.describe_keyspace");
+ }
+
+ oprot->writeMessageBegin("describe_keyspace", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.describe_keyspace", bytes);
+ }
+}
+
+void CassandraProcessor::process_describe_splits(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.describe_splits", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.describe_splits");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.describe_splits");
+ }
+
+ Cassandra_describe_splits_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.describe_splits", bytes);
+ }
+
+ Cassandra_describe_splits_result result;
+ try {
+ iface_->describe_splits(result.success, args.cfName, args.start_token, args.end_token, args.keys_per_split);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.describe_splits");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("describe_splits", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.describe_splits");
+ }
+
+ oprot->writeMessageBegin("describe_splits", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.describe_splits", bytes);
+ }
+}
+
+void CassandraProcessor::process_system_add_column_family(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.system_add_column_family", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.system_add_column_family");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.system_add_column_family");
+ }
+
+ Cassandra_system_add_column_family_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.system_add_column_family", bytes);
+ }
+
+ Cassandra_system_add_column_family_result result;
+ try {
+ iface_->system_add_column_family(result.success, args.cf_def);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (SchemaDisagreementException &sde) {
+ result.sde = sde;
+ result.__isset.sde = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.system_add_column_family");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("system_add_column_family", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.system_add_column_family");
+ }
+
+ oprot->writeMessageBegin("system_add_column_family", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.system_add_column_family", bytes);
+ }
+}
+
+void CassandraProcessor::process_system_drop_column_family(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.system_drop_column_family", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.system_drop_column_family");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.system_drop_column_family");
+ }
+
+ Cassandra_system_drop_column_family_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.system_drop_column_family", bytes);
+ }
+
+ Cassandra_system_drop_column_family_result result;
+ try {
+ iface_->system_drop_column_family(result.success, args.column_family);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (SchemaDisagreementException &sde) {
+ result.sde = sde;
+ result.__isset.sde = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.system_drop_column_family");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("system_drop_column_family", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.system_drop_column_family");
+ }
+
+ oprot->writeMessageBegin("system_drop_column_family", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.system_drop_column_family", bytes);
+ }
+}
+
+void CassandraProcessor::process_system_add_keyspace(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.system_add_keyspace", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.system_add_keyspace");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.system_add_keyspace");
+ }
+
+ Cassandra_system_add_keyspace_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.system_add_keyspace", bytes);
+ }
+
+ Cassandra_system_add_keyspace_result result;
+ try {
+ iface_->system_add_keyspace(result.success, args.ks_def);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (SchemaDisagreementException &sde) {
+ result.sde = sde;
+ result.__isset.sde = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.system_add_keyspace");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("system_add_keyspace", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.system_add_keyspace");
+ }
+
+ oprot->writeMessageBegin("system_add_keyspace", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.system_add_keyspace", bytes);
+ }
+}
+
+void CassandraProcessor::process_system_drop_keyspace(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.system_drop_keyspace", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.system_drop_keyspace");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.system_drop_keyspace");
+ }
+
+ Cassandra_system_drop_keyspace_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.system_drop_keyspace", bytes);
+ }
+
+ Cassandra_system_drop_keyspace_result result;
+ try {
+ iface_->system_drop_keyspace(result.success, args.keyspace);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (SchemaDisagreementException &sde) {
+ result.sde = sde;
+ result.__isset.sde = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.system_drop_keyspace");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("system_drop_keyspace", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.system_drop_keyspace");
+ }
+
+ oprot->writeMessageBegin("system_drop_keyspace", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.system_drop_keyspace", bytes);
+ }
+}
+
+void CassandraProcessor::process_system_update_keyspace(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.system_update_keyspace", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.system_update_keyspace");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.system_update_keyspace");
+ }
+
+ Cassandra_system_update_keyspace_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.system_update_keyspace", bytes);
+ }
+
+ Cassandra_system_update_keyspace_result result;
+ try {
+ iface_->system_update_keyspace(result.success, args.ks_def);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (SchemaDisagreementException &sde) {
+ result.sde = sde;
+ result.__isset.sde = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.system_update_keyspace");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("system_update_keyspace", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.system_update_keyspace");
+ }
+
+ oprot->writeMessageBegin("system_update_keyspace", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.system_update_keyspace", bytes);
+ }
+}
+
+void CassandraProcessor::process_system_update_column_family(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.system_update_column_family", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.system_update_column_family");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.system_update_column_family");
+ }
+
+ Cassandra_system_update_column_family_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.system_update_column_family", bytes);
+ }
+
+ Cassandra_system_update_column_family_result result;
+ try {
+ iface_->system_update_column_family(result.success, args.cf_def);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (SchemaDisagreementException &sde) {
+ result.sde = sde;
+ result.__isset.sde = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.system_update_column_family");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("system_update_column_family", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.system_update_column_family");
+ }
+
+ oprot->writeMessageBegin("system_update_column_family", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.system_update_column_family", bytes);
+ }
+}
+
+void CassandraProcessor::process_execute_cql_query(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.execute_cql_query", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.execute_cql_query");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.execute_cql_query");
+ }
+
+ Cassandra_execute_cql_query_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.execute_cql_query", bytes);
+ }
+
+ Cassandra_execute_cql_query_result result;
+ try {
+ iface_->execute_cql_query(result.success, args.query, args.compression);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (SchemaDisagreementException &sde) {
+ result.sde = sde;
+ result.__isset.sde = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.execute_cql_query");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("execute_cql_query", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.execute_cql_query");
+ }
+
+ oprot->writeMessageBegin("execute_cql_query", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.execute_cql_query", bytes);
+ }
+}
+
+void CassandraProcessor::process_prepare_cql_query(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.prepare_cql_query", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.prepare_cql_query");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.prepare_cql_query");
+ }
+
+ Cassandra_prepare_cql_query_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.prepare_cql_query", bytes);
+ }
+
+ Cassandra_prepare_cql_query_result result;
+ try {
+ iface_->prepare_cql_query(result.success, args.query, args.compression);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.prepare_cql_query");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("prepare_cql_query", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.prepare_cql_query");
+ }
+
+ oprot->writeMessageBegin("prepare_cql_query", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.prepare_cql_query", bytes);
+ }
+}
+
+void CassandraProcessor::process_execute_prepared_cql_query(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.execute_prepared_cql_query", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.execute_prepared_cql_query");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.execute_prepared_cql_query");
+ }
+
+ Cassandra_execute_prepared_cql_query_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.execute_prepared_cql_query", bytes);
+ }
+
+ Cassandra_execute_prepared_cql_query_result result;
+ try {
+ iface_->execute_prepared_cql_query(result.success, args.itemId, args.values);
+ result.__isset.success = true;
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (UnavailableException &ue) {
+ result.ue = ue;
+ result.__isset.ue = true;
+ } catch (TimedOutException &te) {
+ result.te = te;
+ result.__isset.te = true;
+ } catch (SchemaDisagreementException &sde) {
+ result.sde = sde;
+ result.__isset.sde = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.execute_prepared_cql_query");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("execute_prepared_cql_query", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.execute_prepared_cql_query");
+ }
+
+ oprot->writeMessageBegin("execute_prepared_cql_query", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.execute_prepared_cql_query", bytes);
+ }
+}
+
+void CassandraProcessor::process_set_cql_version(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext)
+{
+ void* ctx = NULL;
+ if (this->eventHandler_.get() != NULL) {
+ ctx = this->eventHandler_->getContext("Cassandra.set_cql_version", callContext);
+ }
+ ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "Cassandra.set_cql_version");
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preRead(ctx, "Cassandra.set_cql_version");
+ }
+
+ Cassandra_set_cql_version_args args;
+ args.read(iprot);
+ iprot->readMessageEnd();
+ uint32_t bytes = iprot->getTransport()->readEnd();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postRead(ctx, "Cassandra.set_cql_version", bytes);
+ }
+
+ Cassandra_set_cql_version_result result;
+ try {
+ iface_->set_cql_version(args.version);
+ } catch (InvalidRequestException &ire) {
+ result.ire = ire;
+ result.__isset.ire = true;
+ } catch (const std::exception& e) {
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->handlerError(ctx, "Cassandra.set_cql_version");
+ }
+
+ ::apache::thrift::TApplicationException x(e.what());
+ oprot->writeMessageBegin("set_cql_version", ::apache::thrift::protocol::T_EXCEPTION, seqid);
+ x.write(oprot);
+ oprot->writeMessageEnd();
+ oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+ return;
+ }
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->preWrite(ctx, "Cassandra.set_cql_version");
+ }
+
+ oprot->writeMessageBegin("set_cql_version", ::apache::thrift::protocol::T_REPLY, seqid);
+ result.write(oprot);
+ oprot->writeMessageEnd();
+ bytes = oprot->getTransport()->writeEnd();
+ oprot->getTransport()->flush();
+
+ if (this->eventHandler_.get() != NULL) {
+ this->eventHandler_->postWrite(ctx, "Cassandra.set_cql_version", bytes);
+ }
+}
+
+::boost::shared_ptr< ::apache::thrift::TProcessor > CassandraProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) {
+ ::apache::thrift::ReleaseHandler< CassandraIfFactory > cleanup(handlerFactory_);
+ ::boost::shared_ptr< CassandraIf > handler(handlerFactory_->getHandler(connInfo), cleanup);
+ ::boost::shared_ptr< ::apache::thrift::TProcessor > processor(new CassandraProcessor(handler));
+ return processor;
+}
+}}} // namespace
+
diff --git a/storage/cassandra/gen-cpp/Cassandra.h b/storage/cassandra/gen-cpp/Cassandra.h
new file mode 100644
index 00000000000..2040cc63aa2
--- /dev/null
+++ b/storage/cassandra/gen-cpp/Cassandra.h
@@ -0,0 +1,5466 @@
+/**
+ * Autogenerated by Thrift Compiler (0.8.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+#ifndef Cassandra_H
+#define Cassandra_H
+
+#include <TProcessor.h>
+#include "cassandra_types.h"
+
+namespace org { namespace apache { namespace cassandra {
+
+class CassandraIf {
+ public:
+ virtual ~CassandraIf() {}
+ virtual void login(const AuthenticationRequest& auth_request) = 0;
+ virtual void set_keyspace(const std::string& keyspace) = 0;
+ virtual void get(ColumnOrSuperColumn& _return, const std::string& key, const ColumnPath& column_path, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void get_slice(std::vector<ColumnOrSuperColumn> & _return, const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) = 0;
+ virtual int32_t get_count(const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void multiget_slice(std::map<std::string, std::vector<ColumnOrSuperColumn> > & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void multiget_count(std::map<std::string, int32_t> & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void get_range_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const SlicePredicate& predicate, const KeyRange& range, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void get_paged_slice(std::vector<KeySlice> & _return, const std::string& column_family, const KeyRange& range, const std::string& start_column, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void get_indexed_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const IndexClause& index_clause, const SlicePredicate& column_predicate, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void insert(const std::string& key, const ColumnParent& column_parent, const Column& column, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void add(const std::string& key, const ColumnParent& column_parent, const CounterColumn& column, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void remove(const std::string& key, const ColumnPath& column_path, const int64_t timestamp, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void remove_counter(const std::string& key, const ColumnPath& path, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void batch_mutate(const std::map<std::string, std::map<std::string, std::vector<Mutation> > > & mutation_map, const ConsistencyLevel::type consistency_level) = 0;
+ virtual void truncate(const std::string& cfname) = 0;
+ virtual void describe_schema_versions(std::map<std::string, std::vector<std::string> > & _return) = 0;
+ virtual void describe_keyspaces(std::vector<KsDef> & _return) = 0;
+ virtual void describe_cluster_name(std::string& _return) = 0;
+ virtual void describe_version(std::string& _return) = 0;
+ virtual void describe_ring(std::vector<TokenRange> & _return, const std::string& keyspace) = 0;
+ virtual void describe_token_map(std::map<std::string, std::string> & _return) = 0;
+ virtual void describe_partitioner(std::string& _return) = 0;
+ virtual void describe_snitch(std::string& _return) = 0;
+ virtual void describe_keyspace(KsDef& _return, const std::string& keyspace) = 0;
+ virtual void describe_splits(std::vector<std::string> & _return, const std::string& cfName, const std::string& start_token, const std::string& end_token, const int32_t keys_per_split) = 0;
+ virtual void system_add_column_family(std::string& _return, const CfDef& cf_def) = 0;
+ virtual void system_drop_column_family(std::string& _return, const std::string& column_family) = 0;
+ virtual void system_add_keyspace(std::string& _return, const KsDef& ks_def) = 0;
+ virtual void system_drop_keyspace(std::string& _return, const std::string& keyspace) = 0;
+ virtual void system_update_keyspace(std::string& _return, const KsDef& ks_def) = 0;
+ virtual void system_update_column_family(std::string& _return, const CfDef& cf_def) = 0;
+ virtual void execute_cql_query(CqlResult& _return, const std::string& query, const Compression::type compression) = 0;
+ virtual void prepare_cql_query(CqlPreparedResult& _return, const std::string& query, const Compression::type compression) = 0;
+ virtual void execute_prepared_cql_query(CqlResult& _return, const int32_t itemId, const std::vector<std::string> & values) = 0;
+ virtual void set_cql_version(const std::string& version) = 0;
+};
+
+class CassandraIfFactory {
+ public:
+ typedef CassandraIf Handler;
+
+ virtual ~CassandraIfFactory() {}
+
+ virtual CassandraIf* getHandler(const ::apache::thrift::TConnectionInfo& connInfo) = 0;
+ virtual void releaseHandler(CassandraIf* /* handler */) = 0;
+};
+
+class CassandraIfSingletonFactory : virtual public CassandraIfFactory {
+ public:
+ CassandraIfSingletonFactory(const boost::shared_ptr<CassandraIf>& iface) : iface_(iface) {}
+ virtual ~CassandraIfSingletonFactory() {}
+
+ virtual CassandraIf* getHandler(const ::apache::thrift::TConnectionInfo&) {
+ return iface_.get();
+ }
+ virtual void releaseHandler(CassandraIf* /* handler */) {}
+
+ protected:
+ boost::shared_ptr<CassandraIf> iface_;
+};
+
+class CassandraNull : virtual public CassandraIf {
+ public:
+ virtual ~CassandraNull() {}
+ void login(const AuthenticationRequest& /* auth_request */) {
+ return;
+ }
+ void set_keyspace(const std::string& /* keyspace */) {
+ return;
+ }
+ void get(ColumnOrSuperColumn& /* _return */, const std::string& /* key */, const ColumnPath& /* column_path */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ void get_slice(std::vector<ColumnOrSuperColumn> & /* _return */, const std::string& /* key */, const ColumnParent& /* column_parent */, const SlicePredicate& /* predicate */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ int32_t get_count(const std::string& /* key */, const ColumnParent& /* column_parent */, const SlicePredicate& /* predicate */, const ConsistencyLevel::type /* consistency_level */) {
+ int32_t _return = 0;
+ return _return;
+ }
+ void multiget_slice(std::map<std::string, std::vector<ColumnOrSuperColumn> > & /* _return */, const std::vector<std::string> & /* keys */, const ColumnParent& /* column_parent */, const SlicePredicate& /* predicate */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ void multiget_count(std::map<std::string, int32_t> & /* _return */, const std::vector<std::string> & /* keys */, const ColumnParent& /* column_parent */, const SlicePredicate& /* predicate */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ void get_range_slices(std::vector<KeySlice> & /* _return */, const ColumnParent& /* column_parent */, const SlicePredicate& /* predicate */, const KeyRange& /* range */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ void get_paged_slice(std::vector<KeySlice> & /* _return */, const std::string& /* column_family */, const KeyRange& /* range */, const std::string& /* start_column */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ void get_indexed_slices(std::vector<KeySlice> & /* _return */, const ColumnParent& /* column_parent */, const IndexClause& /* index_clause */, const SlicePredicate& /* column_predicate */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ void insert(const std::string& /* key */, const ColumnParent& /* column_parent */, const Column& /* column */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ void add(const std::string& /* key */, const ColumnParent& /* column_parent */, const CounterColumn& /* column */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ void remove(const std::string& /* key */, const ColumnPath& /* column_path */, const int64_t /* timestamp */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ void remove_counter(const std::string& /* key */, const ColumnPath& /* path */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ void batch_mutate(const std::map<std::string, std::map<std::string, std::vector<Mutation> > > & /* mutation_map */, const ConsistencyLevel::type /* consistency_level */) {
+ return;
+ }
+ void truncate(const std::string& /* cfname */) {
+ return;
+ }
+ void describe_schema_versions(std::map<std::string, std::vector<std::string> > & /* _return */) {
+ return;
+ }
+ void describe_keyspaces(std::vector<KsDef> & /* _return */) {
+ return;
+ }
+ void describe_cluster_name(std::string& /* _return */) {
+ return;
+ }
+ void describe_version(std::string& /* _return */) {
+ return;
+ }
+ void describe_ring(std::vector<TokenRange> & /* _return */, const std::string& /* keyspace */) {
+ return;
+ }
+ void describe_token_map(std::map<std::string, std::string> & /* _return */) {
+ return;
+ }
+ void describe_partitioner(std::string& /* _return */) {
+ return;
+ }
+ void describe_snitch(std::string& /* _return */) {
+ return;
+ }
+ void describe_keyspace(KsDef& /* _return */, const std::string& /* keyspace */) {
+ return;
+ }
+ void describe_splits(std::vector<std::string> & /* _return */, const std::string& /* cfName */, const std::string& /* start_token */, const std::string& /* end_token */, const int32_t /* keys_per_split */) {
+ return;
+ }
+ void system_add_column_family(std::string& /* _return */, const CfDef& /* cf_def */) {
+ return;
+ }
+ void system_drop_column_family(std::string& /* _return */, const std::string& /* column_family */) {
+ return;
+ }
+ void system_add_keyspace(std::string& /* _return */, const KsDef& /* ks_def */) {
+ return;
+ }
+ void system_drop_keyspace(std::string& /* _return */, const std::string& /* keyspace */) {
+ return;
+ }
+ void system_update_keyspace(std::string& /* _return */, const KsDef& /* ks_def */) {
+ return;
+ }
+ void system_update_column_family(std::string& /* _return */, const CfDef& /* cf_def */) {
+ return;
+ }
+ void execute_cql_query(CqlResult& /* _return */, const std::string& /* query */, const Compression::type /* compression */) {
+ return;
+ }
+ void prepare_cql_query(CqlPreparedResult& /* _return */, const std::string& /* query */, const Compression::type /* compression */) {
+ return;
+ }
+ void execute_prepared_cql_query(CqlResult& /* _return */, const int32_t /* itemId */, const std::vector<std::string> & /* values */) {
+ return;
+ }
+ void set_cql_version(const std::string& /* version */) {
+ return;
+ }
+};
+
+
+class Cassandra_login_args {
+ public:
+
+ Cassandra_login_args() {
+ }
+
+ virtual ~Cassandra_login_args() throw() {}
+
+ AuthenticationRequest auth_request;
+
+ void __set_auth_request(const AuthenticationRequest& val) {
+ auth_request = val;
+ }
+
+ bool operator == (const Cassandra_login_args & rhs) const
+ {
+ if (!(auth_request == rhs.auth_request))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_login_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_login_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_login_pargs {
+ public:
+
+
+ virtual ~Cassandra_login_pargs() throw() {}
+
+ const AuthenticationRequest* auth_request;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_login_result__isset {
+ _Cassandra_login_result__isset() : authnx(false), authzx(false) {}
+ bool authnx;
+ bool authzx;
+} _Cassandra_login_result__isset;
+
+class Cassandra_login_result {
+ public:
+
+ Cassandra_login_result() {
+ }
+
+ virtual ~Cassandra_login_result() throw() {}
+
+ AuthenticationException authnx;
+ AuthorizationException authzx;
+
+ _Cassandra_login_result__isset __isset;
+
+ void __set_authnx(const AuthenticationException& val) {
+ authnx = val;
+ }
+
+ void __set_authzx(const AuthorizationException& val) {
+ authzx = val;
+ }
+
+ bool operator == (const Cassandra_login_result & rhs) const
+ {
+ if (!(authnx == rhs.authnx))
+ return false;
+ if (!(authzx == rhs.authzx))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_login_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_login_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_login_presult__isset {
+ _Cassandra_login_presult__isset() : authnx(false), authzx(false) {}
+ bool authnx;
+ bool authzx;
+} _Cassandra_login_presult__isset;
+
+class Cassandra_login_presult {
+ public:
+
+
+ virtual ~Cassandra_login_presult() throw() {}
+
+ AuthenticationException authnx;
+ AuthorizationException authzx;
+
+ _Cassandra_login_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_set_keyspace_args {
+ public:
+
+ Cassandra_set_keyspace_args() : keyspace("") {
+ }
+
+ virtual ~Cassandra_set_keyspace_args() throw() {}
+
+ std::string keyspace;
+
+ void __set_keyspace(const std::string& val) {
+ keyspace = val;
+ }
+
+ bool operator == (const Cassandra_set_keyspace_args & rhs) const
+ {
+ if (!(keyspace == rhs.keyspace))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_set_keyspace_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_set_keyspace_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_set_keyspace_pargs {
+ public:
+
+
+ virtual ~Cassandra_set_keyspace_pargs() throw() {}
+
+ const std::string* keyspace;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_set_keyspace_result__isset {
+ _Cassandra_set_keyspace_result__isset() : ire(false) {}
+ bool ire;
+} _Cassandra_set_keyspace_result__isset;
+
+class Cassandra_set_keyspace_result {
+ public:
+
+ Cassandra_set_keyspace_result() {
+ }
+
+ virtual ~Cassandra_set_keyspace_result() throw() {}
+
+ InvalidRequestException ire;
+
+ _Cassandra_set_keyspace_result__isset __isset;
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ bool operator == (const Cassandra_set_keyspace_result & rhs) const
+ {
+ if (!(ire == rhs.ire))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_set_keyspace_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_set_keyspace_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_set_keyspace_presult__isset {
+ _Cassandra_set_keyspace_presult__isset() : ire(false) {}
+ bool ire;
+} _Cassandra_set_keyspace_presult__isset;
+
+class Cassandra_set_keyspace_presult {
+ public:
+
+
+ virtual ~Cassandra_set_keyspace_presult() throw() {}
+
+ InvalidRequestException ire;
+
+ _Cassandra_set_keyspace_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_get_args {
+ public:
+
+ Cassandra_get_args() : key(""), consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_get_args() throw() {}
+
+ std::string key;
+ ColumnPath column_path;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_key(const std::string& val) {
+ key = val;
+ }
+
+ void __set_column_path(const ColumnPath& val) {
+ column_path = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_get_args & rhs) const
+ {
+ if (!(key == rhs.key))
+ return false;
+ if (!(column_path == rhs.column_path))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_get_pargs {
+ public:
+
+
+ virtual ~Cassandra_get_pargs() throw() {}
+
+ const std::string* key;
+ const ColumnPath* column_path;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_result__isset {
+ _Cassandra_get_result__isset() : success(false), ire(false), nfe(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool nfe;
+ bool ue;
+ bool te;
+} _Cassandra_get_result__isset;
+
+class Cassandra_get_result {
+ public:
+
+ Cassandra_get_result() {
+ }
+
+ virtual ~Cassandra_get_result() throw() {}
+
+ ColumnOrSuperColumn success;
+ InvalidRequestException ire;
+ NotFoundException nfe;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_result__isset __isset;
+
+ void __set_success(const ColumnOrSuperColumn& val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_nfe(const NotFoundException& val) {
+ nfe = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_get_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(nfe == rhs.nfe))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_presult__isset {
+ _Cassandra_get_presult__isset() : success(false), ire(false), nfe(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool nfe;
+ bool ue;
+ bool te;
+} _Cassandra_get_presult__isset;
+
+class Cassandra_get_presult {
+ public:
+
+
+ virtual ~Cassandra_get_presult() throw() {}
+
+ ColumnOrSuperColumn* success;
+ InvalidRequestException ire;
+ NotFoundException nfe;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_get_slice_args {
+ public:
+
+ Cassandra_get_slice_args() : key(""), consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_get_slice_args() throw() {}
+
+ std::string key;
+ ColumnParent column_parent;
+ SlicePredicate predicate;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_key(const std::string& val) {
+ key = val;
+ }
+
+ void __set_column_parent(const ColumnParent& val) {
+ column_parent = val;
+ }
+
+ void __set_predicate(const SlicePredicate& val) {
+ predicate = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_get_slice_args & rhs) const
+ {
+ if (!(key == rhs.key))
+ return false;
+ if (!(column_parent == rhs.column_parent))
+ return false;
+ if (!(predicate == rhs.predicate))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_slice_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_slice_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_get_slice_pargs {
+ public:
+
+
+ virtual ~Cassandra_get_slice_pargs() throw() {}
+
+ const std::string* key;
+ const ColumnParent* column_parent;
+ const SlicePredicate* predicate;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_slice_result__isset {
+ _Cassandra_get_slice_result__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_get_slice_result__isset;
+
+class Cassandra_get_slice_result {
+ public:
+
+ Cassandra_get_slice_result() {
+ }
+
+ virtual ~Cassandra_get_slice_result() throw() {}
+
+ std::vector<ColumnOrSuperColumn> success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_slice_result__isset __isset;
+
+ void __set_success(const std::vector<ColumnOrSuperColumn> & val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_get_slice_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_slice_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_slice_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_slice_presult__isset {
+ _Cassandra_get_slice_presult__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_get_slice_presult__isset;
+
+class Cassandra_get_slice_presult {
+ public:
+
+
+ virtual ~Cassandra_get_slice_presult() throw() {}
+
+ std::vector<ColumnOrSuperColumn> * success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_slice_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_get_count_args {
+ public:
+
+ Cassandra_get_count_args() : key(""), consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_get_count_args() throw() {}
+
+ std::string key;
+ ColumnParent column_parent;
+ SlicePredicate predicate;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_key(const std::string& val) {
+ key = val;
+ }
+
+ void __set_column_parent(const ColumnParent& val) {
+ column_parent = val;
+ }
+
+ void __set_predicate(const SlicePredicate& val) {
+ predicate = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_get_count_args & rhs) const
+ {
+ if (!(key == rhs.key))
+ return false;
+ if (!(column_parent == rhs.column_parent))
+ return false;
+ if (!(predicate == rhs.predicate))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_count_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_count_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_get_count_pargs {
+ public:
+
+
+ virtual ~Cassandra_get_count_pargs() throw() {}
+
+ const std::string* key;
+ const ColumnParent* column_parent;
+ const SlicePredicate* predicate;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_count_result__isset {
+ _Cassandra_get_count_result__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_get_count_result__isset;
+
+class Cassandra_get_count_result {
+ public:
+
+ Cassandra_get_count_result() : success(0) {
+ }
+
+ virtual ~Cassandra_get_count_result() throw() {}
+
+ int32_t success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_count_result__isset __isset;
+
+ void __set_success(const int32_t val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_get_count_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_count_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_count_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_count_presult__isset {
+ _Cassandra_get_count_presult__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_get_count_presult__isset;
+
+class Cassandra_get_count_presult {
+ public:
+
+
+ virtual ~Cassandra_get_count_presult() throw() {}
+
+ int32_t* success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_count_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_multiget_slice_args {
+ public:
+
+ Cassandra_multiget_slice_args() : consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_multiget_slice_args() throw() {}
+
+ std::vector<std::string> keys;
+ ColumnParent column_parent;
+ SlicePredicate predicate;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_keys(const std::vector<std::string> & val) {
+ keys = val;
+ }
+
+ void __set_column_parent(const ColumnParent& val) {
+ column_parent = val;
+ }
+
+ void __set_predicate(const SlicePredicate& val) {
+ predicate = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_multiget_slice_args & rhs) const
+ {
+ if (!(keys == rhs.keys))
+ return false;
+ if (!(column_parent == rhs.column_parent))
+ return false;
+ if (!(predicate == rhs.predicate))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_multiget_slice_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_multiget_slice_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_multiget_slice_pargs {
+ public:
+
+
+ virtual ~Cassandra_multiget_slice_pargs() throw() {}
+
+ const std::vector<std::string> * keys;
+ const ColumnParent* column_parent;
+ const SlicePredicate* predicate;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_multiget_slice_result__isset {
+ _Cassandra_multiget_slice_result__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_multiget_slice_result__isset;
+
+class Cassandra_multiget_slice_result {
+ public:
+
+ Cassandra_multiget_slice_result() {
+ }
+
+ virtual ~Cassandra_multiget_slice_result() throw() {}
+
+ std::map<std::string, std::vector<ColumnOrSuperColumn> > success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_multiget_slice_result__isset __isset;
+
+ void __set_success(const std::map<std::string, std::vector<ColumnOrSuperColumn> > & val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_multiget_slice_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_multiget_slice_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_multiget_slice_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_multiget_slice_presult__isset {
+ _Cassandra_multiget_slice_presult__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_multiget_slice_presult__isset;
+
+class Cassandra_multiget_slice_presult {
+ public:
+
+
+ virtual ~Cassandra_multiget_slice_presult() throw() {}
+
+ std::map<std::string, std::vector<ColumnOrSuperColumn> > * success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_multiget_slice_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_multiget_count_args {
+ public:
+
+ Cassandra_multiget_count_args() : consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_multiget_count_args() throw() {}
+
+ std::vector<std::string> keys;
+ ColumnParent column_parent;
+ SlicePredicate predicate;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_keys(const std::vector<std::string> & val) {
+ keys = val;
+ }
+
+ void __set_column_parent(const ColumnParent& val) {
+ column_parent = val;
+ }
+
+ void __set_predicate(const SlicePredicate& val) {
+ predicate = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_multiget_count_args & rhs) const
+ {
+ if (!(keys == rhs.keys))
+ return false;
+ if (!(column_parent == rhs.column_parent))
+ return false;
+ if (!(predicate == rhs.predicate))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_multiget_count_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_multiget_count_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_multiget_count_pargs {
+ public:
+
+
+ virtual ~Cassandra_multiget_count_pargs() throw() {}
+
+ const std::vector<std::string> * keys;
+ const ColumnParent* column_parent;
+ const SlicePredicate* predicate;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_multiget_count_result__isset {
+ _Cassandra_multiget_count_result__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_multiget_count_result__isset;
+
+class Cassandra_multiget_count_result {
+ public:
+
+ Cassandra_multiget_count_result() {
+ }
+
+ virtual ~Cassandra_multiget_count_result() throw() {}
+
+ std::map<std::string, int32_t> success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_multiget_count_result__isset __isset;
+
+ void __set_success(const std::map<std::string, int32_t> & val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_multiget_count_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_multiget_count_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_multiget_count_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_multiget_count_presult__isset {
+ _Cassandra_multiget_count_presult__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_multiget_count_presult__isset;
+
+class Cassandra_multiget_count_presult {
+ public:
+
+
+ virtual ~Cassandra_multiget_count_presult() throw() {}
+
+ std::map<std::string, int32_t> * success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_multiget_count_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_get_range_slices_args {
+ public:
+
+ Cassandra_get_range_slices_args() : consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_get_range_slices_args() throw() {}
+
+ ColumnParent column_parent;
+ SlicePredicate predicate;
+ KeyRange range;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_column_parent(const ColumnParent& val) {
+ column_parent = val;
+ }
+
+ void __set_predicate(const SlicePredicate& val) {
+ predicate = val;
+ }
+
+ void __set_range(const KeyRange& val) {
+ range = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_get_range_slices_args & rhs) const
+ {
+ if (!(column_parent == rhs.column_parent))
+ return false;
+ if (!(predicate == rhs.predicate))
+ return false;
+ if (!(range == rhs.range))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_range_slices_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_range_slices_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_get_range_slices_pargs {
+ public:
+
+
+ virtual ~Cassandra_get_range_slices_pargs() throw() {}
+
+ const ColumnParent* column_parent;
+ const SlicePredicate* predicate;
+ const KeyRange* range;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_range_slices_result__isset {
+ _Cassandra_get_range_slices_result__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_get_range_slices_result__isset;
+
+class Cassandra_get_range_slices_result {
+ public:
+
+ Cassandra_get_range_slices_result() {
+ }
+
+ virtual ~Cassandra_get_range_slices_result() throw() {}
+
+ std::vector<KeySlice> success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_range_slices_result__isset __isset;
+
+ void __set_success(const std::vector<KeySlice> & val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_get_range_slices_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_range_slices_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_range_slices_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_range_slices_presult__isset {
+ _Cassandra_get_range_slices_presult__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_get_range_slices_presult__isset;
+
+class Cassandra_get_range_slices_presult {
+ public:
+
+
+ virtual ~Cassandra_get_range_slices_presult() throw() {}
+
+ std::vector<KeySlice> * success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_range_slices_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_get_paged_slice_args {
+ public:
+
+ Cassandra_get_paged_slice_args() : column_family(""), start_column(""), consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_get_paged_slice_args() throw() {}
+
+ std::string column_family;
+ KeyRange range;
+ std::string start_column;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_column_family(const std::string& val) {
+ column_family = val;
+ }
+
+ void __set_range(const KeyRange& val) {
+ range = val;
+ }
+
+ void __set_start_column(const std::string& val) {
+ start_column = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_get_paged_slice_args & rhs) const
+ {
+ if (!(column_family == rhs.column_family))
+ return false;
+ if (!(range == rhs.range))
+ return false;
+ if (!(start_column == rhs.start_column))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_paged_slice_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_paged_slice_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_get_paged_slice_pargs {
+ public:
+
+
+ virtual ~Cassandra_get_paged_slice_pargs() throw() {}
+
+ const std::string* column_family;
+ const KeyRange* range;
+ const std::string* start_column;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_paged_slice_result__isset {
+ _Cassandra_get_paged_slice_result__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_get_paged_slice_result__isset;
+
+class Cassandra_get_paged_slice_result {
+ public:
+
+ Cassandra_get_paged_slice_result() {
+ }
+
+ virtual ~Cassandra_get_paged_slice_result() throw() {}
+
+ std::vector<KeySlice> success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_paged_slice_result__isset __isset;
+
+ void __set_success(const std::vector<KeySlice> & val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_get_paged_slice_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_paged_slice_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_paged_slice_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_paged_slice_presult__isset {
+ _Cassandra_get_paged_slice_presult__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_get_paged_slice_presult__isset;
+
+class Cassandra_get_paged_slice_presult {
+ public:
+
+
+ virtual ~Cassandra_get_paged_slice_presult() throw() {}
+
+ std::vector<KeySlice> * success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_paged_slice_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_get_indexed_slices_args {
+ public:
+
+ Cassandra_get_indexed_slices_args() : consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_get_indexed_slices_args() throw() {}
+
+ ColumnParent column_parent;
+ IndexClause index_clause;
+ SlicePredicate column_predicate;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_column_parent(const ColumnParent& val) {
+ column_parent = val;
+ }
+
+ void __set_index_clause(const IndexClause& val) {
+ index_clause = val;
+ }
+
+ void __set_column_predicate(const SlicePredicate& val) {
+ column_predicate = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_get_indexed_slices_args & rhs) const
+ {
+ if (!(column_parent == rhs.column_parent))
+ return false;
+ if (!(index_clause == rhs.index_clause))
+ return false;
+ if (!(column_predicate == rhs.column_predicate))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_indexed_slices_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_indexed_slices_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_get_indexed_slices_pargs {
+ public:
+
+
+ virtual ~Cassandra_get_indexed_slices_pargs() throw() {}
+
+ const ColumnParent* column_parent;
+ const IndexClause* index_clause;
+ const SlicePredicate* column_predicate;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_indexed_slices_result__isset {
+ _Cassandra_get_indexed_slices_result__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_get_indexed_slices_result__isset;
+
+class Cassandra_get_indexed_slices_result {
+ public:
+
+ Cassandra_get_indexed_slices_result() {
+ }
+
+ virtual ~Cassandra_get_indexed_slices_result() throw() {}
+
+ std::vector<KeySlice> success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_indexed_slices_result__isset __isset;
+
+ void __set_success(const std::vector<KeySlice> & val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_get_indexed_slices_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_get_indexed_slices_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_get_indexed_slices_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_get_indexed_slices_presult__isset {
+ _Cassandra_get_indexed_slices_presult__isset() : success(false), ire(false), ue(false), te(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_get_indexed_slices_presult__isset;
+
+class Cassandra_get_indexed_slices_presult {
+ public:
+
+
+ virtual ~Cassandra_get_indexed_slices_presult() throw() {}
+
+ std::vector<KeySlice> * success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_get_indexed_slices_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_insert_args {
+ public:
+
+ Cassandra_insert_args() : key(""), consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_insert_args() throw() {}
+
+ std::string key;
+ ColumnParent column_parent;
+ Column column;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_key(const std::string& val) {
+ key = val;
+ }
+
+ void __set_column_parent(const ColumnParent& val) {
+ column_parent = val;
+ }
+
+ void __set_column(const Column& val) {
+ column = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_insert_args & rhs) const
+ {
+ if (!(key == rhs.key))
+ return false;
+ if (!(column_parent == rhs.column_parent))
+ return false;
+ if (!(column == rhs.column))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_insert_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_insert_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_insert_pargs {
+ public:
+
+
+ virtual ~Cassandra_insert_pargs() throw() {}
+
+ const std::string* key;
+ const ColumnParent* column_parent;
+ const Column* column;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_insert_result__isset {
+ _Cassandra_insert_result__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_insert_result__isset;
+
+class Cassandra_insert_result {
+ public:
+
+ Cassandra_insert_result() {
+ }
+
+ virtual ~Cassandra_insert_result() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_insert_result__isset __isset;
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_insert_result & rhs) const
+ {
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_insert_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_insert_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_insert_presult__isset {
+ _Cassandra_insert_presult__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_insert_presult__isset;
+
+class Cassandra_insert_presult {
+ public:
+
+
+ virtual ~Cassandra_insert_presult() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_insert_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_add_args {
+ public:
+
+ Cassandra_add_args() : key(""), consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_add_args() throw() {}
+
+ std::string key;
+ ColumnParent column_parent;
+ CounterColumn column;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_key(const std::string& val) {
+ key = val;
+ }
+
+ void __set_column_parent(const ColumnParent& val) {
+ column_parent = val;
+ }
+
+ void __set_column(const CounterColumn& val) {
+ column = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_add_args & rhs) const
+ {
+ if (!(key == rhs.key))
+ return false;
+ if (!(column_parent == rhs.column_parent))
+ return false;
+ if (!(column == rhs.column))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_add_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_add_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_add_pargs {
+ public:
+
+
+ virtual ~Cassandra_add_pargs() throw() {}
+
+ const std::string* key;
+ const ColumnParent* column_parent;
+ const CounterColumn* column;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_add_result__isset {
+ _Cassandra_add_result__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_add_result__isset;
+
+class Cassandra_add_result {
+ public:
+
+ Cassandra_add_result() {
+ }
+
+ virtual ~Cassandra_add_result() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_add_result__isset __isset;
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_add_result & rhs) const
+ {
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_add_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_add_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_add_presult__isset {
+ _Cassandra_add_presult__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_add_presult__isset;
+
+class Cassandra_add_presult {
+ public:
+
+
+ virtual ~Cassandra_add_presult() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_add_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _Cassandra_remove_args__isset {
+ _Cassandra_remove_args__isset() : consistency_level(false) {}
+ bool consistency_level;
+} _Cassandra_remove_args__isset;
+
+class Cassandra_remove_args {
+ public:
+
+ Cassandra_remove_args() : key(""), timestamp(0), consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_remove_args() throw() {}
+
+ std::string key;
+ ColumnPath column_path;
+ int64_t timestamp;
+ ConsistencyLevel::type consistency_level;
+
+ _Cassandra_remove_args__isset __isset;
+
+ void __set_key(const std::string& val) {
+ key = val;
+ }
+
+ void __set_column_path(const ColumnPath& val) {
+ column_path = val;
+ }
+
+ void __set_timestamp(const int64_t val) {
+ timestamp = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_remove_args & rhs) const
+ {
+ if (!(key == rhs.key))
+ return false;
+ if (!(column_path == rhs.column_path))
+ return false;
+ if (!(timestamp == rhs.timestamp))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_remove_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_remove_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_remove_pargs {
+ public:
+
+
+ virtual ~Cassandra_remove_pargs() throw() {}
+
+ const std::string* key;
+ const ColumnPath* column_path;
+ const int64_t* timestamp;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_remove_result__isset {
+ _Cassandra_remove_result__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_remove_result__isset;
+
+class Cassandra_remove_result {
+ public:
+
+ Cassandra_remove_result() {
+ }
+
+ virtual ~Cassandra_remove_result() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_remove_result__isset __isset;
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_remove_result & rhs) const
+ {
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_remove_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_remove_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_remove_presult__isset {
+ _Cassandra_remove_presult__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_remove_presult__isset;
+
+class Cassandra_remove_presult {
+ public:
+
+
+ virtual ~Cassandra_remove_presult() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_remove_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_remove_counter_args {
+ public:
+
+ Cassandra_remove_counter_args() : key(""), consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_remove_counter_args() throw() {}
+
+ std::string key;
+ ColumnPath path;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_key(const std::string& val) {
+ key = val;
+ }
+
+ void __set_path(const ColumnPath& val) {
+ path = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_remove_counter_args & rhs) const
+ {
+ if (!(key == rhs.key))
+ return false;
+ if (!(path == rhs.path))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_remove_counter_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_remove_counter_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_remove_counter_pargs {
+ public:
+
+
+ virtual ~Cassandra_remove_counter_pargs() throw() {}
+
+ const std::string* key;
+ const ColumnPath* path;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_remove_counter_result__isset {
+ _Cassandra_remove_counter_result__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_remove_counter_result__isset;
+
+class Cassandra_remove_counter_result {
+ public:
+
+ Cassandra_remove_counter_result() {
+ }
+
+ virtual ~Cassandra_remove_counter_result() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_remove_counter_result__isset __isset;
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_remove_counter_result & rhs) const
+ {
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_remove_counter_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_remove_counter_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_remove_counter_presult__isset {
+ _Cassandra_remove_counter_presult__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_remove_counter_presult__isset;
+
+class Cassandra_remove_counter_presult {
+ public:
+
+
+ virtual ~Cassandra_remove_counter_presult() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_remove_counter_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_batch_mutate_args {
+ public:
+
+ Cassandra_batch_mutate_args() : consistency_level((ConsistencyLevel::type)1) {
+ consistency_level = (ConsistencyLevel::type)1;
+
+ }
+
+ virtual ~Cassandra_batch_mutate_args() throw() {}
+
+ std::map<std::string, std::map<std::string, std::vector<Mutation> > > mutation_map;
+ ConsistencyLevel::type consistency_level;
+
+ void __set_mutation_map(const std::map<std::string, std::map<std::string, std::vector<Mutation> > > & val) {
+ mutation_map = val;
+ }
+
+ void __set_consistency_level(const ConsistencyLevel::type val) {
+ consistency_level = val;
+ }
+
+ bool operator == (const Cassandra_batch_mutate_args & rhs) const
+ {
+ if (!(mutation_map == rhs.mutation_map))
+ return false;
+ if (!(consistency_level == rhs.consistency_level))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_batch_mutate_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_batch_mutate_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_batch_mutate_pargs {
+ public:
+
+
+ virtual ~Cassandra_batch_mutate_pargs() throw() {}
+
+ const std::map<std::string, std::map<std::string, std::vector<Mutation> > > * mutation_map;
+ const ConsistencyLevel::type* consistency_level;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_batch_mutate_result__isset {
+ _Cassandra_batch_mutate_result__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_batch_mutate_result__isset;
+
+class Cassandra_batch_mutate_result {
+ public:
+
+ Cassandra_batch_mutate_result() {
+ }
+
+ virtual ~Cassandra_batch_mutate_result() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_batch_mutate_result__isset __isset;
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_batch_mutate_result & rhs) const
+ {
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_batch_mutate_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_batch_mutate_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_batch_mutate_presult__isset {
+ _Cassandra_batch_mutate_presult__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_batch_mutate_presult__isset;
+
+class Cassandra_batch_mutate_presult {
+ public:
+
+
+ virtual ~Cassandra_batch_mutate_presult() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_batch_mutate_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_truncate_args {
+ public:
+
+ Cassandra_truncate_args() : cfname("") {
+ }
+
+ virtual ~Cassandra_truncate_args() throw() {}
+
+ std::string cfname;
+
+ void __set_cfname(const std::string& val) {
+ cfname = val;
+ }
+
+ bool operator == (const Cassandra_truncate_args & rhs) const
+ {
+ if (!(cfname == rhs.cfname))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_truncate_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_truncate_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_truncate_pargs {
+ public:
+
+
+ virtual ~Cassandra_truncate_pargs() throw() {}
+
+ const std::string* cfname;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_truncate_result__isset {
+ _Cassandra_truncate_result__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_truncate_result__isset;
+
+class Cassandra_truncate_result {
+ public:
+
+ Cassandra_truncate_result() {
+ }
+
+ virtual ~Cassandra_truncate_result() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_truncate_result__isset __isset;
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ bool operator == (const Cassandra_truncate_result & rhs) const
+ {
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_truncate_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_truncate_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_truncate_presult__isset {
+ _Cassandra_truncate_presult__isset() : ire(false), ue(false), te(false) {}
+ bool ire;
+ bool ue;
+ bool te;
+} _Cassandra_truncate_presult__isset;
+
+class Cassandra_truncate_presult {
+ public:
+
+
+ virtual ~Cassandra_truncate_presult() throw() {}
+
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+
+ _Cassandra_truncate_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_describe_schema_versions_args {
+ public:
+
+ Cassandra_describe_schema_versions_args() {
+ }
+
+ virtual ~Cassandra_describe_schema_versions_args() throw() {}
+
+
+ bool operator == (const Cassandra_describe_schema_versions_args & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const Cassandra_describe_schema_versions_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_schema_versions_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_describe_schema_versions_pargs {
+ public:
+
+
+ virtual ~Cassandra_describe_schema_versions_pargs() throw() {}
+
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_schema_versions_result__isset {
+ _Cassandra_describe_schema_versions_result__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_describe_schema_versions_result__isset;
+
+class Cassandra_describe_schema_versions_result {
+ public:
+
+ Cassandra_describe_schema_versions_result() {
+ }
+
+ virtual ~Cassandra_describe_schema_versions_result() throw() {}
+
+ std::map<std::string, std::vector<std::string> > success;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_schema_versions_result__isset __isset;
+
+ void __set_success(const std::map<std::string, std::vector<std::string> > & val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ bool operator == (const Cassandra_describe_schema_versions_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_schema_versions_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_schema_versions_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_schema_versions_presult__isset {
+ _Cassandra_describe_schema_versions_presult__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_describe_schema_versions_presult__isset;
+
+class Cassandra_describe_schema_versions_presult {
+ public:
+
+
+ virtual ~Cassandra_describe_schema_versions_presult() throw() {}
+
+ std::map<std::string, std::vector<std::string> > * success;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_schema_versions_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_describe_keyspaces_args {
+ public:
+
+ Cassandra_describe_keyspaces_args() {
+ }
+
+ virtual ~Cassandra_describe_keyspaces_args() throw() {}
+
+
+ bool operator == (const Cassandra_describe_keyspaces_args & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const Cassandra_describe_keyspaces_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_keyspaces_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_describe_keyspaces_pargs {
+ public:
+
+
+ virtual ~Cassandra_describe_keyspaces_pargs() throw() {}
+
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_keyspaces_result__isset {
+ _Cassandra_describe_keyspaces_result__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_describe_keyspaces_result__isset;
+
+class Cassandra_describe_keyspaces_result {
+ public:
+
+ Cassandra_describe_keyspaces_result() {
+ }
+
+ virtual ~Cassandra_describe_keyspaces_result() throw() {}
+
+ std::vector<KsDef> success;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_keyspaces_result__isset __isset;
+
+ void __set_success(const std::vector<KsDef> & val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ bool operator == (const Cassandra_describe_keyspaces_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_keyspaces_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_keyspaces_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_keyspaces_presult__isset {
+ _Cassandra_describe_keyspaces_presult__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_describe_keyspaces_presult__isset;
+
+class Cassandra_describe_keyspaces_presult {
+ public:
+
+
+ virtual ~Cassandra_describe_keyspaces_presult() throw() {}
+
+ std::vector<KsDef> * success;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_keyspaces_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_describe_cluster_name_args {
+ public:
+
+ Cassandra_describe_cluster_name_args() {
+ }
+
+ virtual ~Cassandra_describe_cluster_name_args() throw() {}
+
+
+ bool operator == (const Cassandra_describe_cluster_name_args & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const Cassandra_describe_cluster_name_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_cluster_name_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_describe_cluster_name_pargs {
+ public:
+
+
+ virtual ~Cassandra_describe_cluster_name_pargs() throw() {}
+
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_cluster_name_result__isset {
+ _Cassandra_describe_cluster_name_result__isset() : success(false) {}
+ bool success;
+} _Cassandra_describe_cluster_name_result__isset;
+
+class Cassandra_describe_cluster_name_result {
+ public:
+
+ Cassandra_describe_cluster_name_result() : success("") {
+ }
+
+ virtual ~Cassandra_describe_cluster_name_result() throw() {}
+
+ std::string success;
+
+ _Cassandra_describe_cluster_name_result__isset __isset;
+
+ void __set_success(const std::string& val) {
+ success = val;
+ }
+
+ bool operator == (const Cassandra_describe_cluster_name_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_cluster_name_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_cluster_name_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_cluster_name_presult__isset {
+ _Cassandra_describe_cluster_name_presult__isset() : success(false) {}
+ bool success;
+} _Cassandra_describe_cluster_name_presult__isset;
+
+class Cassandra_describe_cluster_name_presult {
+ public:
+
+
+ virtual ~Cassandra_describe_cluster_name_presult() throw() {}
+
+ std::string* success;
+
+ _Cassandra_describe_cluster_name_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_describe_version_args {
+ public:
+
+ Cassandra_describe_version_args() {
+ }
+
+ virtual ~Cassandra_describe_version_args() throw() {}
+
+
+ bool operator == (const Cassandra_describe_version_args & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const Cassandra_describe_version_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_version_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_describe_version_pargs {
+ public:
+
+
+ virtual ~Cassandra_describe_version_pargs() throw() {}
+
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_version_result__isset {
+ _Cassandra_describe_version_result__isset() : success(false) {}
+ bool success;
+} _Cassandra_describe_version_result__isset;
+
+class Cassandra_describe_version_result {
+ public:
+
+ Cassandra_describe_version_result() : success("") {
+ }
+
+ virtual ~Cassandra_describe_version_result() throw() {}
+
+ std::string success;
+
+ _Cassandra_describe_version_result__isset __isset;
+
+ void __set_success(const std::string& val) {
+ success = val;
+ }
+
+ bool operator == (const Cassandra_describe_version_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_version_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_version_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_version_presult__isset {
+ _Cassandra_describe_version_presult__isset() : success(false) {}
+ bool success;
+} _Cassandra_describe_version_presult__isset;
+
+class Cassandra_describe_version_presult {
+ public:
+
+
+ virtual ~Cassandra_describe_version_presult() throw() {}
+
+ std::string* success;
+
+ _Cassandra_describe_version_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_describe_ring_args {
+ public:
+
+ Cassandra_describe_ring_args() : keyspace("") {
+ }
+
+ virtual ~Cassandra_describe_ring_args() throw() {}
+
+ std::string keyspace;
+
+ void __set_keyspace(const std::string& val) {
+ keyspace = val;
+ }
+
+ bool operator == (const Cassandra_describe_ring_args & rhs) const
+ {
+ if (!(keyspace == rhs.keyspace))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_ring_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_ring_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_describe_ring_pargs {
+ public:
+
+
+ virtual ~Cassandra_describe_ring_pargs() throw() {}
+
+ const std::string* keyspace;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_ring_result__isset {
+ _Cassandra_describe_ring_result__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_describe_ring_result__isset;
+
+class Cassandra_describe_ring_result {
+ public:
+
+ Cassandra_describe_ring_result() {
+ }
+
+ virtual ~Cassandra_describe_ring_result() throw() {}
+
+ std::vector<TokenRange> success;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_ring_result__isset __isset;
+
+ void __set_success(const std::vector<TokenRange> & val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ bool operator == (const Cassandra_describe_ring_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_ring_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_ring_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_ring_presult__isset {
+ _Cassandra_describe_ring_presult__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_describe_ring_presult__isset;
+
+class Cassandra_describe_ring_presult {
+ public:
+
+
+ virtual ~Cassandra_describe_ring_presult() throw() {}
+
+ std::vector<TokenRange> * success;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_ring_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_describe_token_map_args {
+ public:
+
+ Cassandra_describe_token_map_args() {
+ }
+
+ virtual ~Cassandra_describe_token_map_args() throw() {}
+
+
+ bool operator == (const Cassandra_describe_token_map_args & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const Cassandra_describe_token_map_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_token_map_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_describe_token_map_pargs {
+ public:
+
+
+ virtual ~Cassandra_describe_token_map_pargs() throw() {}
+
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_token_map_result__isset {
+ _Cassandra_describe_token_map_result__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_describe_token_map_result__isset;
+
+class Cassandra_describe_token_map_result {
+ public:
+
+ Cassandra_describe_token_map_result() {
+ }
+
+ virtual ~Cassandra_describe_token_map_result() throw() {}
+
+ std::map<std::string, std::string> success;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_token_map_result__isset __isset;
+
+ void __set_success(const std::map<std::string, std::string> & val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ bool operator == (const Cassandra_describe_token_map_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_token_map_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_token_map_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_token_map_presult__isset {
+ _Cassandra_describe_token_map_presult__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_describe_token_map_presult__isset;
+
+class Cassandra_describe_token_map_presult {
+ public:
+
+
+ virtual ~Cassandra_describe_token_map_presult() throw() {}
+
+ std::map<std::string, std::string> * success;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_token_map_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_describe_partitioner_args {
+ public:
+
+ Cassandra_describe_partitioner_args() {
+ }
+
+ virtual ~Cassandra_describe_partitioner_args() throw() {}
+
+
+ bool operator == (const Cassandra_describe_partitioner_args & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const Cassandra_describe_partitioner_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_partitioner_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_describe_partitioner_pargs {
+ public:
+
+
+ virtual ~Cassandra_describe_partitioner_pargs() throw() {}
+
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_partitioner_result__isset {
+ _Cassandra_describe_partitioner_result__isset() : success(false) {}
+ bool success;
+} _Cassandra_describe_partitioner_result__isset;
+
+class Cassandra_describe_partitioner_result {
+ public:
+
+ Cassandra_describe_partitioner_result() : success("") {
+ }
+
+ virtual ~Cassandra_describe_partitioner_result() throw() {}
+
+ std::string success;
+
+ _Cassandra_describe_partitioner_result__isset __isset;
+
+ void __set_success(const std::string& val) {
+ success = val;
+ }
+
+ bool operator == (const Cassandra_describe_partitioner_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_partitioner_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_partitioner_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_partitioner_presult__isset {
+ _Cassandra_describe_partitioner_presult__isset() : success(false) {}
+ bool success;
+} _Cassandra_describe_partitioner_presult__isset;
+
+class Cassandra_describe_partitioner_presult {
+ public:
+
+
+ virtual ~Cassandra_describe_partitioner_presult() throw() {}
+
+ std::string* success;
+
+ _Cassandra_describe_partitioner_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_describe_snitch_args {
+ public:
+
+ Cassandra_describe_snitch_args() {
+ }
+
+ virtual ~Cassandra_describe_snitch_args() throw() {}
+
+
+ bool operator == (const Cassandra_describe_snitch_args & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const Cassandra_describe_snitch_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_snitch_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_describe_snitch_pargs {
+ public:
+
+
+ virtual ~Cassandra_describe_snitch_pargs() throw() {}
+
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_snitch_result__isset {
+ _Cassandra_describe_snitch_result__isset() : success(false) {}
+ bool success;
+} _Cassandra_describe_snitch_result__isset;
+
+class Cassandra_describe_snitch_result {
+ public:
+
+ Cassandra_describe_snitch_result() : success("") {
+ }
+
+ virtual ~Cassandra_describe_snitch_result() throw() {}
+
+ std::string success;
+
+ _Cassandra_describe_snitch_result__isset __isset;
+
+ void __set_success(const std::string& val) {
+ success = val;
+ }
+
+ bool operator == (const Cassandra_describe_snitch_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_snitch_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_snitch_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_snitch_presult__isset {
+ _Cassandra_describe_snitch_presult__isset() : success(false) {}
+ bool success;
+} _Cassandra_describe_snitch_presult__isset;
+
+class Cassandra_describe_snitch_presult {
+ public:
+
+
+ virtual ~Cassandra_describe_snitch_presult() throw() {}
+
+ std::string* success;
+
+ _Cassandra_describe_snitch_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_describe_keyspace_args {
+ public:
+
+ Cassandra_describe_keyspace_args() : keyspace("") {
+ }
+
+ virtual ~Cassandra_describe_keyspace_args() throw() {}
+
+ std::string keyspace;
+
+ void __set_keyspace(const std::string& val) {
+ keyspace = val;
+ }
+
+ bool operator == (const Cassandra_describe_keyspace_args & rhs) const
+ {
+ if (!(keyspace == rhs.keyspace))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_keyspace_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_keyspace_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_describe_keyspace_pargs {
+ public:
+
+
+ virtual ~Cassandra_describe_keyspace_pargs() throw() {}
+
+ const std::string* keyspace;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_keyspace_result__isset {
+ _Cassandra_describe_keyspace_result__isset() : success(false), nfe(false), ire(false) {}
+ bool success;
+ bool nfe;
+ bool ire;
+} _Cassandra_describe_keyspace_result__isset;
+
+class Cassandra_describe_keyspace_result {
+ public:
+
+ Cassandra_describe_keyspace_result() {
+ }
+
+ virtual ~Cassandra_describe_keyspace_result() throw() {}
+
+ KsDef success;
+ NotFoundException nfe;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_keyspace_result__isset __isset;
+
+ void __set_success(const KsDef& val) {
+ success = val;
+ }
+
+ void __set_nfe(const NotFoundException& val) {
+ nfe = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ bool operator == (const Cassandra_describe_keyspace_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(nfe == rhs.nfe))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_keyspace_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_keyspace_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_keyspace_presult__isset {
+ _Cassandra_describe_keyspace_presult__isset() : success(false), nfe(false), ire(false) {}
+ bool success;
+ bool nfe;
+ bool ire;
+} _Cassandra_describe_keyspace_presult__isset;
+
+class Cassandra_describe_keyspace_presult {
+ public:
+
+
+ virtual ~Cassandra_describe_keyspace_presult() throw() {}
+
+ KsDef* success;
+ NotFoundException nfe;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_keyspace_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_describe_splits_args {
+ public:
+
+ Cassandra_describe_splits_args() : cfName(""), start_token(""), end_token(""), keys_per_split(0) {
+ }
+
+ virtual ~Cassandra_describe_splits_args() throw() {}
+
+ std::string cfName;
+ std::string start_token;
+ std::string end_token;
+ int32_t keys_per_split;
+
+ void __set_cfName(const std::string& val) {
+ cfName = val;
+ }
+
+ void __set_start_token(const std::string& val) {
+ start_token = val;
+ }
+
+ void __set_end_token(const std::string& val) {
+ end_token = val;
+ }
+
+ void __set_keys_per_split(const int32_t val) {
+ keys_per_split = val;
+ }
+
+ bool operator == (const Cassandra_describe_splits_args & rhs) const
+ {
+ if (!(cfName == rhs.cfName))
+ return false;
+ if (!(start_token == rhs.start_token))
+ return false;
+ if (!(end_token == rhs.end_token))
+ return false;
+ if (!(keys_per_split == rhs.keys_per_split))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_splits_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_splits_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_describe_splits_pargs {
+ public:
+
+
+ virtual ~Cassandra_describe_splits_pargs() throw() {}
+
+ const std::string* cfName;
+ const std::string* start_token;
+ const std::string* end_token;
+ const int32_t* keys_per_split;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_splits_result__isset {
+ _Cassandra_describe_splits_result__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_describe_splits_result__isset;
+
+class Cassandra_describe_splits_result {
+ public:
+
+ Cassandra_describe_splits_result() {
+ }
+
+ virtual ~Cassandra_describe_splits_result() throw() {}
+
+ std::vector<std::string> success;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_splits_result__isset __isset;
+
+ void __set_success(const std::vector<std::string> & val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ bool operator == (const Cassandra_describe_splits_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_describe_splits_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_describe_splits_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_describe_splits_presult__isset {
+ _Cassandra_describe_splits_presult__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_describe_splits_presult__isset;
+
+class Cassandra_describe_splits_presult {
+ public:
+
+
+ virtual ~Cassandra_describe_splits_presult() throw() {}
+
+ std::vector<std::string> * success;
+ InvalidRequestException ire;
+
+ _Cassandra_describe_splits_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_system_add_column_family_args {
+ public:
+
+ Cassandra_system_add_column_family_args() {
+ }
+
+ virtual ~Cassandra_system_add_column_family_args() throw() {}
+
+ CfDef cf_def;
+
+ void __set_cf_def(const CfDef& val) {
+ cf_def = val;
+ }
+
+ bool operator == (const Cassandra_system_add_column_family_args & rhs) const
+ {
+ if (!(cf_def == rhs.cf_def))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_add_column_family_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_add_column_family_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_system_add_column_family_pargs {
+ public:
+
+
+ virtual ~Cassandra_system_add_column_family_pargs() throw() {}
+
+ const CfDef* cf_def;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_add_column_family_result__isset {
+ _Cassandra_system_add_column_family_result__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_add_column_family_result__isset;
+
+class Cassandra_system_add_column_family_result {
+ public:
+
+ Cassandra_system_add_column_family_result() : success("") {
+ }
+
+ virtual ~Cassandra_system_add_column_family_result() throw() {}
+
+ std::string success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_add_column_family_result__isset __isset;
+
+ void __set_success(const std::string& val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_sde(const SchemaDisagreementException& val) {
+ sde = val;
+ }
+
+ bool operator == (const Cassandra_system_add_column_family_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(sde == rhs.sde))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_add_column_family_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_add_column_family_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_add_column_family_presult__isset {
+ _Cassandra_system_add_column_family_presult__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_add_column_family_presult__isset;
+
+class Cassandra_system_add_column_family_presult {
+ public:
+
+
+ virtual ~Cassandra_system_add_column_family_presult() throw() {}
+
+ std::string* success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_add_column_family_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_system_drop_column_family_args {
+ public:
+
+ Cassandra_system_drop_column_family_args() : column_family("") {
+ }
+
+ virtual ~Cassandra_system_drop_column_family_args() throw() {}
+
+ std::string column_family;
+
+ void __set_column_family(const std::string& val) {
+ column_family = val;
+ }
+
+ bool operator == (const Cassandra_system_drop_column_family_args & rhs) const
+ {
+ if (!(column_family == rhs.column_family))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_drop_column_family_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_drop_column_family_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_system_drop_column_family_pargs {
+ public:
+
+
+ virtual ~Cassandra_system_drop_column_family_pargs() throw() {}
+
+ const std::string* column_family;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_drop_column_family_result__isset {
+ _Cassandra_system_drop_column_family_result__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_drop_column_family_result__isset;
+
+class Cassandra_system_drop_column_family_result {
+ public:
+
+ Cassandra_system_drop_column_family_result() : success("") {
+ }
+
+ virtual ~Cassandra_system_drop_column_family_result() throw() {}
+
+ std::string success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_drop_column_family_result__isset __isset;
+
+ void __set_success(const std::string& val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_sde(const SchemaDisagreementException& val) {
+ sde = val;
+ }
+
+ bool operator == (const Cassandra_system_drop_column_family_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(sde == rhs.sde))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_drop_column_family_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_drop_column_family_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_drop_column_family_presult__isset {
+ _Cassandra_system_drop_column_family_presult__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_drop_column_family_presult__isset;
+
+class Cassandra_system_drop_column_family_presult {
+ public:
+
+
+ virtual ~Cassandra_system_drop_column_family_presult() throw() {}
+
+ std::string* success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_drop_column_family_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_system_add_keyspace_args {
+ public:
+
+ Cassandra_system_add_keyspace_args() {
+ }
+
+ virtual ~Cassandra_system_add_keyspace_args() throw() {}
+
+ KsDef ks_def;
+
+ void __set_ks_def(const KsDef& val) {
+ ks_def = val;
+ }
+
+ bool operator == (const Cassandra_system_add_keyspace_args & rhs) const
+ {
+ if (!(ks_def == rhs.ks_def))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_add_keyspace_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_add_keyspace_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_system_add_keyspace_pargs {
+ public:
+
+
+ virtual ~Cassandra_system_add_keyspace_pargs() throw() {}
+
+ const KsDef* ks_def;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_add_keyspace_result__isset {
+ _Cassandra_system_add_keyspace_result__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_add_keyspace_result__isset;
+
+class Cassandra_system_add_keyspace_result {
+ public:
+
+ Cassandra_system_add_keyspace_result() : success("") {
+ }
+
+ virtual ~Cassandra_system_add_keyspace_result() throw() {}
+
+ std::string success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_add_keyspace_result__isset __isset;
+
+ void __set_success(const std::string& val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_sde(const SchemaDisagreementException& val) {
+ sde = val;
+ }
+
+ bool operator == (const Cassandra_system_add_keyspace_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(sde == rhs.sde))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_add_keyspace_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_add_keyspace_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_add_keyspace_presult__isset {
+ _Cassandra_system_add_keyspace_presult__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_add_keyspace_presult__isset;
+
+class Cassandra_system_add_keyspace_presult {
+ public:
+
+
+ virtual ~Cassandra_system_add_keyspace_presult() throw() {}
+
+ std::string* success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_add_keyspace_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_system_drop_keyspace_args {
+ public:
+
+ Cassandra_system_drop_keyspace_args() : keyspace("") {
+ }
+
+ virtual ~Cassandra_system_drop_keyspace_args() throw() {}
+
+ std::string keyspace;
+
+ void __set_keyspace(const std::string& val) {
+ keyspace = val;
+ }
+
+ bool operator == (const Cassandra_system_drop_keyspace_args & rhs) const
+ {
+ if (!(keyspace == rhs.keyspace))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_drop_keyspace_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_drop_keyspace_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_system_drop_keyspace_pargs {
+ public:
+
+
+ virtual ~Cassandra_system_drop_keyspace_pargs() throw() {}
+
+ const std::string* keyspace;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_drop_keyspace_result__isset {
+ _Cassandra_system_drop_keyspace_result__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_drop_keyspace_result__isset;
+
+class Cassandra_system_drop_keyspace_result {
+ public:
+
+ Cassandra_system_drop_keyspace_result() : success("") {
+ }
+
+ virtual ~Cassandra_system_drop_keyspace_result() throw() {}
+
+ std::string success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_drop_keyspace_result__isset __isset;
+
+ void __set_success(const std::string& val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_sde(const SchemaDisagreementException& val) {
+ sde = val;
+ }
+
+ bool operator == (const Cassandra_system_drop_keyspace_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(sde == rhs.sde))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_drop_keyspace_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_drop_keyspace_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_drop_keyspace_presult__isset {
+ _Cassandra_system_drop_keyspace_presult__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_drop_keyspace_presult__isset;
+
+class Cassandra_system_drop_keyspace_presult {
+ public:
+
+
+ virtual ~Cassandra_system_drop_keyspace_presult() throw() {}
+
+ std::string* success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_drop_keyspace_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_system_update_keyspace_args {
+ public:
+
+ Cassandra_system_update_keyspace_args() {
+ }
+
+ virtual ~Cassandra_system_update_keyspace_args() throw() {}
+
+ KsDef ks_def;
+
+ void __set_ks_def(const KsDef& val) {
+ ks_def = val;
+ }
+
+ bool operator == (const Cassandra_system_update_keyspace_args & rhs) const
+ {
+ if (!(ks_def == rhs.ks_def))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_update_keyspace_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_update_keyspace_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_system_update_keyspace_pargs {
+ public:
+
+
+ virtual ~Cassandra_system_update_keyspace_pargs() throw() {}
+
+ const KsDef* ks_def;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_update_keyspace_result__isset {
+ _Cassandra_system_update_keyspace_result__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_update_keyspace_result__isset;
+
+class Cassandra_system_update_keyspace_result {
+ public:
+
+ Cassandra_system_update_keyspace_result() : success("") {
+ }
+
+ virtual ~Cassandra_system_update_keyspace_result() throw() {}
+
+ std::string success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_update_keyspace_result__isset __isset;
+
+ void __set_success(const std::string& val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_sde(const SchemaDisagreementException& val) {
+ sde = val;
+ }
+
+ bool operator == (const Cassandra_system_update_keyspace_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(sde == rhs.sde))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_update_keyspace_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_update_keyspace_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_update_keyspace_presult__isset {
+ _Cassandra_system_update_keyspace_presult__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_update_keyspace_presult__isset;
+
+class Cassandra_system_update_keyspace_presult {
+ public:
+
+
+ virtual ~Cassandra_system_update_keyspace_presult() throw() {}
+
+ std::string* success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_update_keyspace_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_system_update_column_family_args {
+ public:
+
+ Cassandra_system_update_column_family_args() {
+ }
+
+ virtual ~Cassandra_system_update_column_family_args() throw() {}
+
+ CfDef cf_def;
+
+ void __set_cf_def(const CfDef& val) {
+ cf_def = val;
+ }
+
+ bool operator == (const Cassandra_system_update_column_family_args & rhs) const
+ {
+ if (!(cf_def == rhs.cf_def))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_update_column_family_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_update_column_family_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_system_update_column_family_pargs {
+ public:
+
+
+ virtual ~Cassandra_system_update_column_family_pargs() throw() {}
+
+ const CfDef* cf_def;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_update_column_family_result__isset {
+ _Cassandra_system_update_column_family_result__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_update_column_family_result__isset;
+
+class Cassandra_system_update_column_family_result {
+ public:
+
+ Cassandra_system_update_column_family_result() : success("") {
+ }
+
+ virtual ~Cassandra_system_update_column_family_result() throw() {}
+
+ std::string success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_update_column_family_result__isset __isset;
+
+ void __set_success(const std::string& val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_sde(const SchemaDisagreementException& val) {
+ sde = val;
+ }
+
+ bool operator == (const Cassandra_system_update_column_family_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(sde == rhs.sde))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_system_update_column_family_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_system_update_column_family_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_system_update_column_family_presult__isset {
+ _Cassandra_system_update_column_family_presult__isset() : success(false), ire(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool sde;
+} _Cassandra_system_update_column_family_presult__isset;
+
+class Cassandra_system_update_column_family_presult {
+ public:
+
+
+ virtual ~Cassandra_system_update_column_family_presult() throw() {}
+
+ std::string* success;
+ InvalidRequestException ire;
+ SchemaDisagreementException sde;
+
+ _Cassandra_system_update_column_family_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_execute_cql_query_args {
+ public:
+
+ Cassandra_execute_cql_query_args() : query(""), compression((Compression::type)0) {
+ }
+
+ virtual ~Cassandra_execute_cql_query_args() throw() {}
+
+ std::string query;
+ Compression::type compression;
+
+ void __set_query(const std::string& val) {
+ query = val;
+ }
+
+ void __set_compression(const Compression::type val) {
+ compression = val;
+ }
+
+ bool operator == (const Cassandra_execute_cql_query_args & rhs) const
+ {
+ if (!(query == rhs.query))
+ return false;
+ if (!(compression == rhs.compression))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_execute_cql_query_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_execute_cql_query_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_execute_cql_query_pargs {
+ public:
+
+
+ virtual ~Cassandra_execute_cql_query_pargs() throw() {}
+
+ const std::string* query;
+ const Compression::type* compression;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_execute_cql_query_result__isset {
+ _Cassandra_execute_cql_query_result__isset() : success(false), ire(false), ue(false), te(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+ bool sde;
+} _Cassandra_execute_cql_query_result__isset;
+
+class Cassandra_execute_cql_query_result {
+ public:
+
+ Cassandra_execute_cql_query_result() {
+ }
+
+ virtual ~Cassandra_execute_cql_query_result() throw() {}
+
+ CqlResult success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+ SchemaDisagreementException sde;
+
+ _Cassandra_execute_cql_query_result__isset __isset;
+
+ void __set_success(const CqlResult& val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ void __set_sde(const SchemaDisagreementException& val) {
+ sde = val;
+ }
+
+ bool operator == (const Cassandra_execute_cql_query_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ if (!(sde == rhs.sde))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_execute_cql_query_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_execute_cql_query_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_execute_cql_query_presult__isset {
+ _Cassandra_execute_cql_query_presult__isset() : success(false), ire(false), ue(false), te(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+ bool sde;
+} _Cassandra_execute_cql_query_presult__isset;
+
+class Cassandra_execute_cql_query_presult {
+ public:
+
+
+ virtual ~Cassandra_execute_cql_query_presult() throw() {}
+
+ CqlResult* success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+ SchemaDisagreementException sde;
+
+ _Cassandra_execute_cql_query_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_prepare_cql_query_args {
+ public:
+
+ Cassandra_prepare_cql_query_args() : query(""), compression((Compression::type)0) {
+ }
+
+ virtual ~Cassandra_prepare_cql_query_args() throw() {}
+
+ std::string query;
+ Compression::type compression;
+
+ void __set_query(const std::string& val) {
+ query = val;
+ }
+
+ void __set_compression(const Compression::type val) {
+ compression = val;
+ }
+
+ bool operator == (const Cassandra_prepare_cql_query_args & rhs) const
+ {
+ if (!(query == rhs.query))
+ return false;
+ if (!(compression == rhs.compression))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_prepare_cql_query_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_prepare_cql_query_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_prepare_cql_query_pargs {
+ public:
+
+
+ virtual ~Cassandra_prepare_cql_query_pargs() throw() {}
+
+ const std::string* query;
+ const Compression::type* compression;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_prepare_cql_query_result__isset {
+ _Cassandra_prepare_cql_query_result__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_prepare_cql_query_result__isset;
+
+class Cassandra_prepare_cql_query_result {
+ public:
+
+ Cassandra_prepare_cql_query_result() {
+ }
+
+ virtual ~Cassandra_prepare_cql_query_result() throw() {}
+
+ CqlPreparedResult success;
+ InvalidRequestException ire;
+
+ _Cassandra_prepare_cql_query_result__isset __isset;
+
+ void __set_success(const CqlPreparedResult& val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ bool operator == (const Cassandra_prepare_cql_query_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_prepare_cql_query_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_prepare_cql_query_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_prepare_cql_query_presult__isset {
+ _Cassandra_prepare_cql_query_presult__isset() : success(false), ire(false) {}
+ bool success;
+ bool ire;
+} _Cassandra_prepare_cql_query_presult__isset;
+
+class Cassandra_prepare_cql_query_presult {
+ public:
+
+
+ virtual ~Cassandra_prepare_cql_query_presult() throw() {}
+
+ CqlPreparedResult* success;
+ InvalidRequestException ire;
+
+ _Cassandra_prepare_cql_query_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_execute_prepared_cql_query_args {
+ public:
+
+ Cassandra_execute_prepared_cql_query_args() : itemId(0) {
+ }
+
+ virtual ~Cassandra_execute_prepared_cql_query_args() throw() {}
+
+ int32_t itemId;
+ std::vector<std::string> values;
+
+ void __set_itemId(const int32_t val) {
+ itemId = val;
+ }
+
+ void __set_values(const std::vector<std::string> & val) {
+ values = val;
+ }
+
+ bool operator == (const Cassandra_execute_prepared_cql_query_args & rhs) const
+ {
+ if (!(itemId == rhs.itemId))
+ return false;
+ if (!(values == rhs.values))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_execute_prepared_cql_query_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_execute_prepared_cql_query_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_execute_prepared_cql_query_pargs {
+ public:
+
+
+ virtual ~Cassandra_execute_prepared_cql_query_pargs() throw() {}
+
+ const int32_t* itemId;
+ const std::vector<std::string> * values;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_execute_prepared_cql_query_result__isset {
+ _Cassandra_execute_prepared_cql_query_result__isset() : success(false), ire(false), ue(false), te(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+ bool sde;
+} _Cassandra_execute_prepared_cql_query_result__isset;
+
+class Cassandra_execute_prepared_cql_query_result {
+ public:
+
+ Cassandra_execute_prepared_cql_query_result() {
+ }
+
+ virtual ~Cassandra_execute_prepared_cql_query_result() throw() {}
+
+ CqlResult success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+ SchemaDisagreementException sde;
+
+ _Cassandra_execute_prepared_cql_query_result__isset __isset;
+
+ void __set_success(const CqlResult& val) {
+ success = val;
+ }
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ void __set_ue(const UnavailableException& val) {
+ ue = val;
+ }
+
+ void __set_te(const TimedOutException& val) {
+ te = val;
+ }
+
+ void __set_sde(const SchemaDisagreementException& val) {
+ sde = val;
+ }
+
+ bool operator == (const Cassandra_execute_prepared_cql_query_result & rhs) const
+ {
+ if (!(success == rhs.success))
+ return false;
+ if (!(ire == rhs.ire))
+ return false;
+ if (!(ue == rhs.ue))
+ return false;
+ if (!(te == rhs.te))
+ return false;
+ if (!(sde == rhs.sde))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_execute_prepared_cql_query_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_execute_prepared_cql_query_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_execute_prepared_cql_query_presult__isset {
+ _Cassandra_execute_prepared_cql_query_presult__isset() : success(false), ire(false), ue(false), te(false), sde(false) {}
+ bool success;
+ bool ire;
+ bool ue;
+ bool te;
+ bool sde;
+} _Cassandra_execute_prepared_cql_query_presult__isset;
+
+class Cassandra_execute_prepared_cql_query_presult {
+ public:
+
+
+ virtual ~Cassandra_execute_prepared_cql_query_presult() throw() {}
+
+ CqlResult* success;
+ InvalidRequestException ire;
+ UnavailableException ue;
+ TimedOutException te;
+ SchemaDisagreementException sde;
+
+ _Cassandra_execute_prepared_cql_query_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class Cassandra_set_cql_version_args {
+ public:
+
+ Cassandra_set_cql_version_args() : version("") {
+ }
+
+ virtual ~Cassandra_set_cql_version_args() throw() {}
+
+ std::string version;
+
+ void __set_version(const std::string& val) {
+ version = val;
+ }
+
+ bool operator == (const Cassandra_set_cql_version_args & rhs) const
+ {
+ if (!(version == rhs.version))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_set_cql_version_args &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_set_cql_version_args & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class Cassandra_set_cql_version_pargs {
+ public:
+
+
+ virtual ~Cassandra_set_cql_version_pargs() throw() {}
+
+ const std::string* version;
+
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_set_cql_version_result__isset {
+ _Cassandra_set_cql_version_result__isset() : ire(false) {}
+ bool ire;
+} _Cassandra_set_cql_version_result__isset;
+
+class Cassandra_set_cql_version_result {
+ public:
+
+ Cassandra_set_cql_version_result() {
+ }
+
+ virtual ~Cassandra_set_cql_version_result() throw() {}
+
+ InvalidRequestException ire;
+
+ _Cassandra_set_cql_version_result__isset __isset;
+
+ void __set_ire(const InvalidRequestException& val) {
+ ire = val;
+ }
+
+ bool operator == (const Cassandra_set_cql_version_result & rhs) const
+ {
+ if (!(ire == rhs.ire))
+ return false;
+ return true;
+ }
+ bool operator != (const Cassandra_set_cql_version_result &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Cassandra_set_cql_version_result & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Cassandra_set_cql_version_presult__isset {
+ _Cassandra_set_cql_version_presult__isset() : ire(false) {}
+ bool ire;
+} _Cassandra_set_cql_version_presult__isset;
+
+class Cassandra_set_cql_version_presult {
+ public:
+
+
+ virtual ~Cassandra_set_cql_version_presult() throw() {}
+
+ InvalidRequestException ire;
+
+ _Cassandra_set_cql_version_presult__isset __isset;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+class CassandraClient : virtual public CassandraIf {
+ public:
+ CassandraClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) :
+ piprot_(prot),
+ poprot_(prot) {
+ iprot_ = prot.get();
+ oprot_ = prot.get();
+ }
+ CassandraClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) :
+ piprot_(iprot),
+ poprot_(oprot) {
+ iprot_ = iprot.get();
+ oprot_ = oprot.get();
+ }
+ boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() {
+ return piprot_;
+ }
+ boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() {
+ return poprot_;
+ }
+ void login(const AuthenticationRequest& auth_request);
+ void send_login(const AuthenticationRequest& auth_request);
+ void recv_login();
+ void set_keyspace(const std::string& keyspace);
+ void send_set_keyspace(const std::string& keyspace);
+ void recv_set_keyspace();
+ void get(ColumnOrSuperColumn& _return, const std::string& key, const ColumnPath& column_path, const ConsistencyLevel::type consistency_level);
+ void send_get(const std::string& key, const ColumnPath& column_path, const ConsistencyLevel::type consistency_level);
+ void recv_get(ColumnOrSuperColumn& _return);
+ void get_slice(std::vector<ColumnOrSuperColumn> & _return, const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level);
+ void send_get_slice(const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level);
+ void recv_get_slice(std::vector<ColumnOrSuperColumn> & _return);
+ int32_t get_count(const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level);
+ void send_get_count(const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level);
+ int32_t recv_get_count();
+ void multiget_slice(std::map<std::string, std::vector<ColumnOrSuperColumn> > & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level);
+ void send_multiget_slice(const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level);
+ void recv_multiget_slice(std::map<std::string, std::vector<ColumnOrSuperColumn> > & _return);
+ void multiget_count(std::map<std::string, int32_t> & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level);
+ void send_multiget_count(const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level);
+ void recv_multiget_count(std::map<std::string, int32_t> & _return);
+ void get_range_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const SlicePredicate& predicate, const KeyRange& range, const ConsistencyLevel::type consistency_level);
+ void send_get_range_slices(const ColumnParent& column_parent, const SlicePredicate& predicate, const KeyRange& range, const ConsistencyLevel::type consistency_level);
+ void recv_get_range_slices(std::vector<KeySlice> & _return);
+ void get_paged_slice(std::vector<KeySlice> & _return, const std::string& column_family, const KeyRange& range, const std::string& start_column, const ConsistencyLevel::type consistency_level);
+ void send_get_paged_slice(const std::string& column_family, const KeyRange& range, const std::string& start_column, const ConsistencyLevel::type consistency_level);
+ void recv_get_paged_slice(std::vector<KeySlice> & _return);
+ void get_indexed_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const IndexClause& index_clause, const SlicePredicate& column_predicate, const ConsistencyLevel::type consistency_level);
+ void send_get_indexed_slices(const ColumnParent& column_parent, const IndexClause& index_clause, const SlicePredicate& column_predicate, const ConsistencyLevel::type consistency_level);
+ void recv_get_indexed_slices(std::vector<KeySlice> & _return);
+ void insert(const std::string& key, const ColumnParent& column_parent, const Column& column, const ConsistencyLevel::type consistency_level);
+ void send_insert(const std::string& key, const ColumnParent& column_parent, const Column& column, const ConsistencyLevel::type consistency_level);
+ void recv_insert();
+ void add(const std::string& key, const ColumnParent& column_parent, const CounterColumn& column, const ConsistencyLevel::type consistency_level);
+ void send_add(const std::string& key, const ColumnParent& column_parent, const CounterColumn& column, const ConsistencyLevel::type consistency_level);
+ void recv_add();
+ void remove(const std::string& key, const ColumnPath& column_path, const int64_t timestamp, const ConsistencyLevel::type consistency_level);
+ void send_remove(const std::string& key, const ColumnPath& column_path, const int64_t timestamp, const ConsistencyLevel::type consistency_level);
+ void recv_remove();
+ void remove_counter(const std::string& key, const ColumnPath& path, const ConsistencyLevel::type consistency_level);
+ void send_remove_counter(const std::string& key, const ColumnPath& path, const ConsistencyLevel::type consistency_level);
+ void recv_remove_counter();
+ void batch_mutate(const std::map<std::string, std::map<std::string, std::vector<Mutation> > > & mutation_map, const ConsistencyLevel::type consistency_level);
+ void send_batch_mutate(const std::map<std::string, std::map<std::string, std::vector<Mutation> > > & mutation_map, const ConsistencyLevel::type consistency_level);
+ void recv_batch_mutate();
+ void truncate(const std::string& cfname);
+ void send_truncate(const std::string& cfname);
+ void recv_truncate();
+ void describe_schema_versions(std::map<std::string, std::vector<std::string> > & _return);
+ void send_describe_schema_versions();
+ void recv_describe_schema_versions(std::map<std::string, std::vector<std::string> > & _return);
+ void describe_keyspaces(std::vector<KsDef> & _return);
+ void send_describe_keyspaces();
+ void recv_describe_keyspaces(std::vector<KsDef> & _return);
+ void describe_cluster_name(std::string& _return);
+ void send_describe_cluster_name();
+ void recv_describe_cluster_name(std::string& _return);
+ void describe_version(std::string& _return);
+ void send_describe_version();
+ void recv_describe_version(std::string& _return);
+ void describe_ring(std::vector<TokenRange> & _return, const std::string& keyspace);
+ void send_describe_ring(const std::string& keyspace);
+ void recv_describe_ring(std::vector<TokenRange> & _return);
+ void describe_token_map(std::map<std::string, std::string> & _return);
+ void send_describe_token_map();
+ void recv_describe_token_map(std::map<std::string, std::string> & _return);
+ void describe_partitioner(std::string& _return);
+ void send_describe_partitioner();
+ void recv_describe_partitioner(std::string& _return);
+ void describe_snitch(std::string& _return);
+ void send_describe_snitch();
+ void recv_describe_snitch(std::string& _return);
+ void describe_keyspace(KsDef& _return, const std::string& keyspace);
+ void send_describe_keyspace(const std::string& keyspace);
+ void recv_describe_keyspace(KsDef& _return);
+ void describe_splits(std::vector<std::string> & _return, const std::string& cfName, const std::string& start_token, const std::string& end_token, const int32_t keys_per_split);
+ void send_describe_splits(const std::string& cfName, const std::string& start_token, const std::string& end_token, const int32_t keys_per_split);
+ void recv_describe_splits(std::vector<std::string> & _return);
+ void system_add_column_family(std::string& _return, const CfDef& cf_def);
+ void send_system_add_column_family(const CfDef& cf_def);
+ void recv_system_add_column_family(std::string& _return);
+ void system_drop_column_family(std::string& _return, const std::string& column_family);
+ void send_system_drop_column_family(const std::string& column_family);
+ void recv_system_drop_column_family(std::string& _return);
+ void system_add_keyspace(std::string& _return, const KsDef& ks_def);
+ void send_system_add_keyspace(const KsDef& ks_def);
+ void recv_system_add_keyspace(std::string& _return);
+ void system_drop_keyspace(std::string& _return, const std::string& keyspace);
+ void send_system_drop_keyspace(const std::string& keyspace);
+ void recv_system_drop_keyspace(std::string& _return);
+ void system_update_keyspace(std::string& _return, const KsDef& ks_def);
+ void send_system_update_keyspace(const KsDef& ks_def);
+ void recv_system_update_keyspace(std::string& _return);
+ void system_update_column_family(std::string& _return, const CfDef& cf_def);
+ void send_system_update_column_family(const CfDef& cf_def);
+ void recv_system_update_column_family(std::string& _return);
+ void execute_cql_query(CqlResult& _return, const std::string& query, const Compression::type compression);
+ void send_execute_cql_query(const std::string& query, const Compression::type compression);
+ void recv_execute_cql_query(CqlResult& _return);
+ void prepare_cql_query(CqlPreparedResult& _return, const std::string& query, const Compression::type compression);
+ void send_prepare_cql_query(const std::string& query, const Compression::type compression);
+ void recv_prepare_cql_query(CqlPreparedResult& _return);
+ void execute_prepared_cql_query(CqlResult& _return, const int32_t itemId, const std::vector<std::string> & values);
+ void send_execute_prepared_cql_query(const int32_t itemId, const std::vector<std::string> & values);
+ void recv_execute_prepared_cql_query(CqlResult& _return);
+ void set_cql_version(const std::string& version);
+ void send_set_cql_version(const std::string& version);
+ void recv_set_cql_version();
+ protected:
+ boost::shared_ptr< ::apache::thrift::protocol::TProtocol> piprot_;
+ boost::shared_ptr< ::apache::thrift::protocol::TProtocol> poprot_;
+ ::apache::thrift::protocol::TProtocol* iprot_;
+ ::apache::thrift::protocol::TProtocol* oprot_;
+};
+
+class CassandraProcessor : public ::apache::thrift::TProcessor {
+ protected:
+ boost::shared_ptr<CassandraIf> iface_;
+ virtual bool process_fn(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, std::string& fname, int32_t seqid, void* callContext);
+ private:
+ std::map<std::string, void (CassandraProcessor::*)(int32_t, ::apache::thrift::protocol::TProtocol*, ::apache::thrift::protocol::TProtocol*, void*)> processMap_;
+ void process_login(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_set_keyspace(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_get(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_get_slice(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_get_count(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_multiget_slice(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_multiget_count(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_get_range_slices(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_get_paged_slice(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_get_indexed_slices(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_insert(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_add(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_remove(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_remove_counter(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_batch_mutate(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_truncate(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_describe_schema_versions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_describe_keyspaces(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_describe_cluster_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_describe_version(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_describe_ring(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_describe_token_map(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_describe_partitioner(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_describe_snitch(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_describe_keyspace(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_describe_splits(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_system_add_column_family(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_system_drop_column_family(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_system_add_keyspace(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_system_drop_keyspace(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_system_update_keyspace(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_system_update_column_family(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_execute_cql_query(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_prepare_cql_query(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_execute_prepared_cql_query(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ void process_set_cql_version(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+ public:
+ CassandraProcessor(boost::shared_ptr<CassandraIf> iface) :
+ iface_(iface) {
+ processMap_["login"] = &CassandraProcessor::process_login;
+ processMap_["set_keyspace"] = &CassandraProcessor::process_set_keyspace;
+ processMap_["get"] = &CassandraProcessor::process_get;
+ processMap_["get_slice"] = &CassandraProcessor::process_get_slice;
+ processMap_["get_count"] = &CassandraProcessor::process_get_count;
+ processMap_["multiget_slice"] = &CassandraProcessor::process_multiget_slice;
+ processMap_["multiget_count"] = &CassandraProcessor::process_multiget_count;
+ processMap_["get_range_slices"] = &CassandraProcessor::process_get_range_slices;
+ processMap_["get_paged_slice"] = &CassandraProcessor::process_get_paged_slice;
+ processMap_["get_indexed_slices"] = &CassandraProcessor::process_get_indexed_slices;
+ processMap_["insert"] = &CassandraProcessor::process_insert;
+ processMap_["add"] = &CassandraProcessor::process_add;
+ processMap_["remove"] = &CassandraProcessor::process_remove;
+ processMap_["remove_counter"] = &CassandraProcessor::process_remove_counter;
+ processMap_["batch_mutate"] = &CassandraProcessor::process_batch_mutate;
+ processMap_["truncate"] = &CassandraProcessor::process_truncate;
+ processMap_["describe_schema_versions"] = &CassandraProcessor::process_describe_schema_versions;
+ processMap_["describe_keyspaces"] = &CassandraProcessor::process_describe_keyspaces;
+ processMap_["describe_cluster_name"] = &CassandraProcessor::process_describe_cluster_name;
+ processMap_["describe_version"] = &CassandraProcessor::process_describe_version;
+ processMap_["describe_ring"] = &CassandraProcessor::process_describe_ring;
+ processMap_["describe_token_map"] = &CassandraProcessor::process_describe_token_map;
+ processMap_["describe_partitioner"] = &CassandraProcessor::process_describe_partitioner;
+ processMap_["describe_snitch"] = &CassandraProcessor::process_describe_snitch;
+ processMap_["describe_keyspace"] = &CassandraProcessor::process_describe_keyspace;
+ processMap_["describe_splits"] = &CassandraProcessor::process_describe_splits;
+ processMap_["system_add_column_family"] = &CassandraProcessor::process_system_add_column_family;
+ processMap_["system_drop_column_family"] = &CassandraProcessor::process_system_drop_column_family;
+ processMap_["system_add_keyspace"] = &CassandraProcessor::process_system_add_keyspace;
+ processMap_["system_drop_keyspace"] = &CassandraProcessor::process_system_drop_keyspace;
+ processMap_["system_update_keyspace"] = &CassandraProcessor::process_system_update_keyspace;
+ processMap_["system_update_column_family"] = &CassandraProcessor::process_system_update_column_family;
+ processMap_["execute_cql_query"] = &CassandraProcessor::process_execute_cql_query;
+ processMap_["prepare_cql_query"] = &CassandraProcessor::process_prepare_cql_query;
+ processMap_["execute_prepared_cql_query"] = &CassandraProcessor::process_execute_prepared_cql_query;
+ processMap_["set_cql_version"] = &CassandraProcessor::process_set_cql_version;
+ }
+
+ virtual bool process(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> piprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> poprot, void* callContext);
+ virtual ~CassandraProcessor() {}
+};
+
+class CassandraProcessorFactory : public ::apache::thrift::TProcessorFactory {
+ public:
+ CassandraProcessorFactory(const ::boost::shared_ptr< CassandraIfFactory >& handlerFactory) :
+ handlerFactory_(handlerFactory) {}
+
+ ::boost::shared_ptr< ::apache::thrift::TProcessor > getProcessor(const ::apache::thrift::TConnectionInfo& connInfo);
+
+ protected:
+ ::boost::shared_ptr< CassandraIfFactory > handlerFactory_;
+};
+
+class CassandraMultiface : virtual public CassandraIf {
+ public:
+ CassandraMultiface(std::vector<boost::shared_ptr<CassandraIf> >& ifaces) : ifaces_(ifaces) {
+ }
+ virtual ~CassandraMultiface() {}
+ protected:
+ std::vector<boost::shared_ptr<CassandraIf> > ifaces_;
+ CassandraMultiface() {}
+ void add(boost::shared_ptr<CassandraIf> iface) {
+ ifaces_.push_back(iface);
+ }
+ public:
+ void login(const AuthenticationRequest& auth_request) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ ifaces_[i]->login(auth_request);
+ }
+ }
+
+ void set_keyspace(const std::string& keyspace) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ ifaces_[i]->set_keyspace(keyspace);
+ }
+ }
+
+ void get(ColumnOrSuperColumn& _return, const std::string& key, const ColumnPath& column_path, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->get(_return, key, column_path, consistency_level);
+ return;
+ } else {
+ ifaces_[i]->get(_return, key, column_path, consistency_level);
+ }
+ }
+ }
+
+ void get_slice(std::vector<ColumnOrSuperColumn> & _return, const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->get_slice(_return, key, column_parent, predicate, consistency_level);
+ return;
+ } else {
+ ifaces_[i]->get_slice(_return, key, column_parent, predicate, consistency_level);
+ }
+ }
+ }
+
+ int32_t get_count(const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ return ifaces_[i]->get_count(key, column_parent, predicate, consistency_level);
+ } else {
+ ifaces_[i]->get_count(key, column_parent, predicate, consistency_level);
+ }
+ }
+ return 0xDEADBEEF; //psergey: shut up the compiler
+ }
+
+ void multiget_slice(std::map<std::string, std::vector<ColumnOrSuperColumn> > & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->multiget_slice(_return, keys, column_parent, predicate, consistency_level);
+ return;
+ } else {
+ ifaces_[i]->multiget_slice(_return, keys, column_parent, predicate, consistency_level);
+ }
+ }
+ }
+
+ void multiget_count(std::map<std::string, int32_t> & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->multiget_count(_return, keys, column_parent, predicate, consistency_level);
+ return;
+ } else {
+ ifaces_[i]->multiget_count(_return, keys, column_parent, predicate, consistency_level);
+ }
+ }
+ }
+
+ void get_range_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const SlicePredicate& predicate, const KeyRange& range, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->get_range_slices(_return, column_parent, predicate, range, consistency_level);
+ return;
+ } else {
+ ifaces_[i]->get_range_slices(_return, column_parent, predicate, range, consistency_level);
+ }
+ }
+ }
+
+ void get_paged_slice(std::vector<KeySlice> & _return, const std::string& column_family, const KeyRange& range, const std::string& start_column, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->get_paged_slice(_return, column_family, range, start_column, consistency_level);
+ return;
+ } else {
+ ifaces_[i]->get_paged_slice(_return, column_family, range, start_column, consistency_level);
+ }
+ }
+ }
+
+ void get_indexed_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const IndexClause& index_clause, const SlicePredicate& column_predicate, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->get_indexed_slices(_return, column_parent, index_clause, column_predicate, consistency_level);
+ return;
+ } else {
+ ifaces_[i]->get_indexed_slices(_return, column_parent, index_clause, column_predicate, consistency_level);
+ }
+ }
+ }
+
+ void insert(const std::string& key, const ColumnParent& column_parent, const Column& column, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ ifaces_[i]->insert(key, column_parent, column, consistency_level);
+ }
+ }
+
+ void add(const std::string& key, const ColumnParent& column_parent, const CounterColumn& column, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ ifaces_[i]->add(key, column_parent, column, consistency_level);
+ }
+ }
+
+ void remove(const std::string& key, const ColumnPath& column_path, const int64_t timestamp, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ ifaces_[i]->remove(key, column_path, timestamp, consistency_level);
+ }
+ }
+
+ void remove_counter(const std::string& key, const ColumnPath& path, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ ifaces_[i]->remove_counter(key, path, consistency_level);
+ }
+ }
+
+ void batch_mutate(const std::map<std::string, std::map<std::string, std::vector<Mutation> > > & mutation_map, const ConsistencyLevel::type consistency_level) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ ifaces_[i]->batch_mutate(mutation_map, consistency_level);
+ }
+ }
+
+ void truncate(const std::string& cfname) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ ifaces_[i]->truncate(cfname);
+ }
+ }
+
+ void describe_schema_versions(std::map<std::string, std::vector<std::string> > & _return) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->describe_schema_versions(_return);
+ return;
+ } else {
+ ifaces_[i]->describe_schema_versions(_return);
+ }
+ }
+ }
+
+ void describe_keyspaces(std::vector<KsDef> & _return) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->describe_keyspaces(_return);
+ return;
+ } else {
+ ifaces_[i]->describe_keyspaces(_return);
+ }
+ }
+ }
+
+ void describe_cluster_name(std::string& _return) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->describe_cluster_name(_return);
+ return;
+ } else {
+ ifaces_[i]->describe_cluster_name(_return);
+ }
+ }
+ }
+
+ void describe_version(std::string& _return) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->describe_version(_return);
+ return;
+ } else {
+ ifaces_[i]->describe_version(_return);
+ }
+ }
+ }
+
+ void describe_ring(std::vector<TokenRange> & _return, const std::string& keyspace) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->describe_ring(_return, keyspace);
+ return;
+ } else {
+ ifaces_[i]->describe_ring(_return, keyspace);
+ }
+ }
+ }
+
+ void describe_token_map(std::map<std::string, std::string> & _return) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->describe_token_map(_return);
+ return;
+ } else {
+ ifaces_[i]->describe_token_map(_return);
+ }
+ }
+ }
+
+ void describe_partitioner(std::string& _return) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->describe_partitioner(_return);
+ return;
+ } else {
+ ifaces_[i]->describe_partitioner(_return);
+ }
+ }
+ }
+
+ void describe_snitch(std::string& _return) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->describe_snitch(_return);
+ return;
+ } else {
+ ifaces_[i]->describe_snitch(_return);
+ }
+ }
+ }
+
+ void describe_keyspace(KsDef& _return, const std::string& keyspace) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->describe_keyspace(_return, keyspace);
+ return;
+ } else {
+ ifaces_[i]->describe_keyspace(_return, keyspace);
+ }
+ }
+ }
+
+ void describe_splits(std::vector<std::string> & _return, const std::string& cfName, const std::string& start_token, const std::string& end_token, const int32_t keys_per_split) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->describe_splits(_return, cfName, start_token, end_token, keys_per_split);
+ return;
+ } else {
+ ifaces_[i]->describe_splits(_return, cfName, start_token, end_token, keys_per_split);
+ }
+ }
+ }
+
+ void system_add_column_family(std::string& _return, const CfDef& cf_def) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->system_add_column_family(_return, cf_def);
+ return;
+ } else {
+ ifaces_[i]->system_add_column_family(_return, cf_def);
+ }
+ }
+ }
+
+ void system_drop_column_family(std::string& _return, const std::string& column_family) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->system_drop_column_family(_return, column_family);
+ return;
+ } else {
+ ifaces_[i]->system_drop_column_family(_return, column_family);
+ }
+ }
+ }
+
+ void system_add_keyspace(std::string& _return, const KsDef& ks_def) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->system_add_keyspace(_return, ks_def);
+ return;
+ } else {
+ ifaces_[i]->system_add_keyspace(_return, ks_def);
+ }
+ }
+ }
+
+ void system_drop_keyspace(std::string& _return, const std::string& keyspace) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->system_drop_keyspace(_return, keyspace);
+ return;
+ } else {
+ ifaces_[i]->system_drop_keyspace(_return, keyspace);
+ }
+ }
+ }
+
+ void system_update_keyspace(std::string& _return, const KsDef& ks_def) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->system_update_keyspace(_return, ks_def);
+ return;
+ } else {
+ ifaces_[i]->system_update_keyspace(_return, ks_def);
+ }
+ }
+ }
+
+ void system_update_column_family(std::string& _return, const CfDef& cf_def) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->system_update_column_family(_return, cf_def);
+ return;
+ } else {
+ ifaces_[i]->system_update_column_family(_return, cf_def);
+ }
+ }
+ }
+
+ void execute_cql_query(CqlResult& _return, const std::string& query, const Compression::type compression) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->execute_cql_query(_return, query, compression);
+ return;
+ } else {
+ ifaces_[i]->execute_cql_query(_return, query, compression);
+ }
+ }
+ }
+
+ void prepare_cql_query(CqlPreparedResult& _return, const std::string& query, const Compression::type compression) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->prepare_cql_query(_return, query, compression);
+ return;
+ } else {
+ ifaces_[i]->prepare_cql_query(_return, query, compression);
+ }
+ }
+ }
+
+ void execute_prepared_cql_query(CqlResult& _return, const int32_t itemId, const std::vector<std::string> & values) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ if (i == sz - 1) {
+ ifaces_[i]->execute_prepared_cql_query(_return, itemId, values);
+ return;
+ } else {
+ ifaces_[i]->execute_prepared_cql_query(_return, itemId, values);
+ }
+ }
+ }
+
+ void set_cql_version(const std::string& version) {
+ size_t sz = ifaces_.size();
+ for (size_t i = 0; i < sz; ++i) {
+ ifaces_[i]->set_cql_version(version);
+ }
+ }
+
+};
+
+}}} // namespace
+
+#endif
diff --git a/storage/cassandra/gen-cpp/Cassandra_server.skeleton.cpp b/storage/cassandra/gen-cpp/Cassandra_server.skeleton.cpp
new file mode 100644
index 00000000000..4d4e489ef4d
--- /dev/null
+++ b/storage/cassandra/gen-cpp/Cassandra_server.skeleton.cpp
@@ -0,0 +1,219 @@
+// This autogenerated skeleton file illustrates how to build a server.
+// You should copy it to another filename to avoid overwriting it.
+
+#include "Cassandra.h"
+#include <protocol/TBinaryProtocol.h>
+#include <server/TSimpleServer.h>
+#include <transport/TServerSocket.h>
+#include <transport/TBufferTransports.h>
+
+using namespace ::apache::thrift;
+using namespace ::apache::thrift::protocol;
+using namespace ::apache::thrift::transport;
+using namespace ::apache::thrift::server;
+
+using boost::shared_ptr;
+
+using namespace ::org::apache::cassandra;
+
+class CassandraHandler : virtual public CassandraIf {
+ public:
+ CassandraHandler() {
+ // Your initialization goes here
+ }
+
+ void login(const AuthenticationRequest& auth_request) {
+ // Your implementation goes here
+ printf("login\n");
+ }
+
+ void set_keyspace(const std::string& keyspace) {
+ // Your implementation goes here
+ printf("set_keyspace\n");
+ }
+
+ void get(ColumnOrSuperColumn& _return, const std::string& key, const ColumnPath& column_path, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("get\n");
+ }
+
+ void get_slice(std::vector<ColumnOrSuperColumn> & _return, const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("get_slice\n");
+ }
+
+ int32_t get_count(const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("get_count\n");
+ }
+
+ void multiget_slice(std::map<std::string, std::vector<ColumnOrSuperColumn> > & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("multiget_slice\n");
+ }
+
+ void multiget_count(std::map<std::string, int32_t> & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("multiget_count\n");
+ }
+
+ void get_range_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const SlicePredicate& predicate, const KeyRange& range, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("get_range_slices\n");
+ }
+
+ void get_paged_slice(std::vector<KeySlice> & _return, const std::string& column_family, const KeyRange& range, const std::string& start_column, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("get_paged_slice\n");
+ }
+
+ void get_indexed_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const IndexClause& index_clause, const SlicePredicate& column_predicate, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("get_indexed_slices\n");
+ }
+
+ void insert(const std::string& key, const ColumnParent& column_parent, const Column& column, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("insert\n");
+ }
+
+ void add(const std::string& key, const ColumnParent& column_parent, const CounterColumn& column, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("add\n");
+ }
+
+ void remove(const std::string& key, const ColumnPath& column_path, const int64_t timestamp, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("remove\n");
+ }
+
+ void remove_counter(const std::string& key, const ColumnPath& path, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("remove_counter\n");
+ }
+
+ void batch_mutate(const std::map<std::string, std::map<std::string, std::vector<Mutation> > > & mutation_map, const ConsistencyLevel::type consistency_level) {
+ // Your implementation goes here
+ printf("batch_mutate\n");
+ }
+
+ void truncate(const std::string& cfname) {
+ // Your implementation goes here
+ printf("truncate\n");
+ }
+
+ void describe_schema_versions(std::map<std::string, std::vector<std::string> > & _return) {
+ // Your implementation goes here
+ printf("describe_schema_versions\n");
+ }
+
+ void describe_keyspaces(std::vector<KsDef> & _return) {
+ // Your implementation goes here
+ printf("describe_keyspaces\n");
+ }
+
+ void describe_cluster_name(std::string& _return) {
+ // Your implementation goes here
+ printf("describe_cluster_name\n");
+ }
+
+ void describe_version(std::string& _return) {
+ // Your implementation goes here
+ printf("describe_version\n");
+ }
+
+ void describe_ring(std::vector<TokenRange> & _return, const std::string& keyspace) {
+ // Your implementation goes here
+ printf("describe_ring\n");
+ }
+
+ void describe_token_map(std::map<std::string, std::string> & _return) {
+ // Your implementation goes here
+ printf("describe_token_map\n");
+ }
+
+ void describe_partitioner(std::string& _return) {
+ // Your implementation goes here
+ printf("describe_partitioner\n");
+ }
+
+ void describe_snitch(std::string& _return) {
+ // Your implementation goes here
+ printf("describe_snitch\n");
+ }
+
+ void describe_keyspace(KsDef& _return, const std::string& keyspace) {
+ // Your implementation goes here
+ printf("describe_keyspace\n");
+ }
+
+ void describe_splits(std::vector<std::string> & _return, const std::string& cfName, const std::string& start_token, const std::string& end_token, const int32_t keys_per_split) {
+ // Your implementation goes here
+ printf("describe_splits\n");
+ }
+
+ void system_add_column_family(std::string& _return, const CfDef& cf_def) {
+ // Your implementation goes here
+ printf("system_add_column_family\n");
+ }
+
+ void system_drop_column_family(std::string& _return, const std::string& column_family) {
+ // Your implementation goes here
+ printf("system_drop_column_family\n");
+ }
+
+ void system_add_keyspace(std::string& _return, const KsDef& ks_def) {
+ // Your implementation goes here
+ printf("system_add_keyspace\n");
+ }
+
+ void system_drop_keyspace(std::string& _return, const std::string& keyspace) {
+ // Your implementation goes here
+ printf("system_drop_keyspace\n");
+ }
+
+ void system_update_keyspace(std::string& _return, const KsDef& ks_def) {
+ // Your implementation goes here
+ printf("system_update_keyspace\n");
+ }
+
+ void system_update_column_family(std::string& _return, const CfDef& cf_def) {
+ // Your implementation goes here
+ printf("system_update_column_family\n");
+ }
+
+ void execute_cql_query(CqlResult& _return, const std::string& query, const Compression::type compression) {
+ // Your implementation goes here
+ printf("execute_cql_query\n");
+ }
+
+ void prepare_cql_query(CqlPreparedResult& _return, const std::string& query, const Compression::type compression) {
+ // Your implementation goes here
+ printf("prepare_cql_query\n");
+ }
+
+ void execute_prepared_cql_query(CqlResult& _return, const int32_t itemId, const std::vector<std::string> & values) {
+ // Your implementation goes here
+ printf("execute_prepared_cql_query\n");
+ }
+
+ void set_cql_version(const std::string& version) {
+ // Your implementation goes here
+ printf("set_cql_version\n");
+ }
+
+};
+
+int main(int argc, char **argv) {
+ int port = 9090;
+ shared_ptr<CassandraHandler> handler(new CassandraHandler());
+ shared_ptr<TProcessor> processor(new CassandraProcessor(handler));
+ shared_ptr<TServerTransport> serverTransport(new TServerSocket(port));
+ shared_ptr<TTransportFactory> transportFactory(new TBufferedTransportFactory());
+ shared_ptr<TProtocolFactory> protocolFactory(new TBinaryProtocolFactory());
+
+ TSimpleServer server(processor, serverTransport, transportFactory, protocolFactory);
+ server.serve();
+ return 0;
+}
+
diff --git a/storage/cassandra/gen-cpp/cassandra_constants.cpp b/storage/cassandra/gen-cpp/cassandra_constants.cpp
new file mode 100644
index 00000000000..49a01d2773e
--- /dev/null
+++ b/storage/cassandra/gen-cpp/cassandra_constants.cpp
@@ -0,0 +1,18 @@
+/**
+ * Autogenerated by Thrift Compiler (0.8.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+#include "cassandra_constants.h"
+
+namespace org { namespace apache { namespace cassandra {
+
+const cassandraConstants g_cassandra_constants;
+
+cassandraConstants::cassandraConstants() {
+ cassandra_const_VERSION = (char *)"19.32.0";
+}
+
+}}} // namespace
+
diff --git a/storage/cassandra/gen-cpp/cassandra_constants.h b/storage/cassandra/gen-cpp/cassandra_constants.h
new file mode 100644
index 00000000000..fa12a1676ae
--- /dev/null
+++ b/storage/cassandra/gen-cpp/cassandra_constants.h
@@ -0,0 +1,26 @@
+/**
+ * Autogenerated by Thrift Compiler (0.8.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+#ifndef cassandra_CONSTANTS_H
+#define cassandra_CONSTANTS_H
+
+#include "cassandra_types.h"
+
+namespace org { namespace apache { namespace cassandra {
+
+class cassandraConstants {
+ public:
+ cassandraConstants();
+
+// std::string VERSION;
+ char* cassandra_const_VERSION;
+};
+
+extern const cassandraConstants g_cassandra_constants;
+
+}}} // namespace
+
+#endif
diff --git a/storage/cassandra/gen-cpp/cassandra_types.cpp b/storage/cassandra/gen-cpp/cassandra_types.cpp
new file mode 100644
index 00000000000..4b51c21a320
--- /dev/null
+++ b/storage/cassandra/gen-cpp/cassandra_types.cpp
@@ -0,0 +1,3512 @@
+/**
+ * Autogenerated by Thrift Compiler (0.8.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+#include "cassandra_types.h"
+
+namespace org { namespace apache { namespace cassandra {
+
+int _kConsistencyLevelValues[] = {
+ ConsistencyLevel::ONE,
+ ConsistencyLevel::QUORUM,
+ ConsistencyLevel::LOCAL_QUORUM,
+ ConsistencyLevel::EACH_QUORUM,
+ ConsistencyLevel::ALL,
+ ConsistencyLevel::ANY,
+ ConsistencyLevel::TWO,
+ ConsistencyLevel::THREE
+};
+const char* _kConsistencyLevelNames[] = {
+ "ONE",
+ "QUORUM",
+ "LOCAL_QUORUM",
+ "EACH_QUORUM",
+ "ALL",
+ "ANY",
+ "TWO",
+ "THREE"
+};
+const std::map<int, const char*> _ConsistencyLevel_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(8, _kConsistencyLevelValues, _kConsistencyLevelNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kIndexOperatorValues[] = {
+ IndexOperator::EQ,
+ IndexOperator::GTE,
+ IndexOperator::GT,
+ IndexOperator::LTE,
+ IndexOperator::LT
+};
+const char* _kIndexOperatorNames[] = {
+ "EQ",
+ "GTE",
+ "GT",
+ "LTE",
+ "LT"
+};
+const std::map<int, const char*> _IndexOperator_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(5, _kIndexOperatorValues, _kIndexOperatorNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kIndexTypeValues[] = {
+ IndexType::KEYS,
+ IndexType::CUSTOM
+};
+const char* _kIndexTypeNames[] = {
+ "KEYS",
+ "CUSTOM"
+};
+const std::map<int, const char*> _IndexType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kIndexTypeValues, _kIndexTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kCompressionValues[] = {
+ Compression::GZIP,
+ Compression::NONE
+};
+const char* _kCompressionNames[] = {
+ "GZIP",
+ "NONE"
+};
+const std::map<int, const char*> _Compression_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kCompressionValues, _kCompressionNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kCqlResultTypeValues[] = {
+ CqlResultType::ROWS,
+ CqlResultType::VOID,
+ CqlResultType::INT
+};
+const char* _kCqlResultTypeNames[] = {
+ "ROWS",
+ "VOID",
+ "INT"
+};
+const std::map<int, const char*> _CqlResultType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kCqlResultTypeValues, _kCqlResultTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+const char* Column::ascii_fingerprint = "3EE0E1C5C844001B62F08125068292CC";
+const uint8_t Column::binary_fingerprint[16] = {0x3E,0xE0,0xE1,0xC5,0xC8,0x44,0x00,0x1B,0x62,0xF0,0x81,0x25,0x06,0x82,0x92,0xCC};
+
+uint32_t Column::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_name = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->name);
+ isset_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->value);
+ this->__isset.value = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->timestamp);
+ this->__isset.timestamp = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->ttl);
+ this->__isset.ttl = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_name)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t Column::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Column");
+ xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->name);
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.value) {
+ xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeBinary(this->value);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.timestamp) {
+ xfer += oprot->writeFieldBegin("timestamp", ::apache::thrift::protocol::T_I64, 3);
+ xfer += oprot->writeI64(this->timestamp);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.ttl) {
+ xfer += oprot->writeFieldBegin("ttl", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32(this->ttl);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* SuperColumn::ascii_fingerprint = "470EFC558004E98D92D604898305C04E";
+const uint8_t SuperColumn::binary_fingerprint[16] = {0x47,0x0E,0xFC,0x55,0x80,0x04,0xE9,0x8D,0x92,0xD6,0x04,0x89,0x83,0x05,0xC0,0x4E};
+
+uint32_t SuperColumn::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_name = false;
+ bool isset_columns = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->name);
+ isset_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->columns.clear();
+ uint32_t _size0;
+ ::apache::thrift::protocol::TType _etype3;
+ iprot->readListBegin(_etype3, _size0);
+ this->columns.resize(_size0);
+ uint32_t _i4;
+ for (_i4 = 0; _i4 < _size0; ++_i4)
+ {
+ xfer += this->columns[_i4].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ isset_columns = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_name)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_columns)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t SuperColumn::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("SuperColumn");
+ xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("columns", ::apache::thrift::protocol::T_LIST, 2);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->columns.size()));
+ std::vector<Column> ::const_iterator _iter5;
+ for (_iter5 = this->columns.begin(); _iter5 != this->columns.end(); ++_iter5)
+ {
+ xfer += (*_iter5).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* CounterColumn::ascii_fingerprint = "1CCCF6FC31CFD1D61BBBB1BAF3590620";
+const uint8_t CounterColumn::binary_fingerprint[16] = {0x1C,0xCC,0xF6,0xFC,0x31,0xCF,0xD1,0xD6,0x1B,0xBB,0xB1,0xBA,0xF3,0x59,0x06,0x20};
+
+uint32_t CounterColumn::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_name = false;
+ bool isset_value = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->name);
+ isset_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->value);
+ isset_value = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_name)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_value)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t CounterColumn::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("CounterColumn");
+ xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_I64, 2);
+ xfer += oprot->writeI64(this->value);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* CounterSuperColumn::ascii_fingerprint = "CD4C8C4BF7753E46DE417CDE369343A4";
+const uint8_t CounterSuperColumn::binary_fingerprint[16] = {0xCD,0x4C,0x8C,0x4B,0xF7,0x75,0x3E,0x46,0xDE,0x41,0x7C,0xDE,0x36,0x93,0x43,0xA4};
+
+uint32_t CounterSuperColumn::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_name = false;
+ bool isset_columns = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->name);
+ isset_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->columns.clear();
+ uint32_t _size6;
+ ::apache::thrift::protocol::TType _etype9;
+ iprot->readListBegin(_etype9, _size6);
+ this->columns.resize(_size6);
+ uint32_t _i10;
+ for (_i10 = 0; _i10 < _size6; ++_i10)
+ {
+ xfer += this->columns[_i10].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ isset_columns = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_name)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_columns)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t CounterSuperColumn::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("CounterSuperColumn");
+ xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("columns", ::apache::thrift::protocol::T_LIST, 2);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->columns.size()));
+ std::vector<CounterColumn> ::const_iterator _iter11;
+ for (_iter11 = this->columns.begin(); _iter11 != this->columns.end(); ++_iter11)
+ {
+ xfer += (*_iter11).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* ColumnOrSuperColumn::ascii_fingerprint = "2B34AC9E80F1DAA3A2A63B1AB1841E61";
+const uint8_t ColumnOrSuperColumn::binary_fingerprint[16] = {0x2B,0x34,0xAC,0x9E,0x80,0xF1,0xDA,0xA3,0xA2,0xA6,0x3B,0x1A,0xB1,0x84,0x1E,0x61};
+
+uint32_t ColumnOrSuperColumn::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column.read(iprot);
+ this->__isset.column = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->super_column.read(iprot);
+ this->__isset.super_column = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->counter_column.read(iprot);
+ this->__isset.counter_column = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->counter_super_column.read(iprot);
+ this->__isset.counter_super_column = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t ColumnOrSuperColumn::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ColumnOrSuperColumn");
+ if (this->__isset.column) {
+ xfer += oprot->writeFieldBegin("column", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->column.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.super_column) {
+ xfer += oprot->writeFieldBegin("super_column", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->super_column.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.counter_column) {
+ xfer += oprot->writeFieldBegin("counter_column", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->counter_column.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.counter_super_column) {
+ xfer += oprot->writeFieldBegin("counter_super_column", ::apache::thrift::protocol::T_STRUCT, 4);
+ xfer += this->counter_super_column.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* NotFoundException::ascii_fingerprint = "99914B932BD37A50B983C5E7C90AE93B";
+const uint8_t NotFoundException::binary_fingerprint[16] = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
+
+uint32_t NotFoundException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t NotFoundException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("NotFoundException");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* InvalidRequestException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
+const uint8_t InvalidRequestException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
+
+uint32_t InvalidRequestException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_why = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->why);
+ isset_why = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_why)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t InvalidRequestException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("InvalidRequestException");
+ xfer += oprot->writeFieldBegin("why", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->why);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* UnavailableException::ascii_fingerprint = "99914B932BD37A50B983C5E7C90AE93B";
+const uint8_t UnavailableException::binary_fingerprint[16] = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
+
+uint32_t UnavailableException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t UnavailableException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("UnavailableException");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* TimedOutException::ascii_fingerprint = "99914B932BD37A50B983C5E7C90AE93B";
+const uint8_t TimedOutException::binary_fingerprint[16] = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
+
+uint32_t TimedOutException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t TimedOutException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("TimedOutException");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* AuthenticationException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
+const uint8_t AuthenticationException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
+
+uint32_t AuthenticationException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_why = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->why);
+ isset_why = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_why)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t AuthenticationException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("AuthenticationException");
+ xfer += oprot->writeFieldBegin("why", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->why);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* AuthorizationException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
+const uint8_t AuthorizationException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
+
+uint32_t AuthorizationException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_why = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->why);
+ isset_why = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_why)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t AuthorizationException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("AuthorizationException");
+ xfer += oprot->writeFieldBegin("why", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->why);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* SchemaDisagreementException::ascii_fingerprint = "99914B932BD37A50B983C5E7C90AE93B";
+const uint8_t SchemaDisagreementException::binary_fingerprint[16] = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
+
+uint32_t SchemaDisagreementException::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t SchemaDisagreementException::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("SchemaDisagreementException");
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* ColumnParent::ascii_fingerprint = "0A13AE61181713A4100DFFB3EC293822";
+const uint8_t ColumnParent::binary_fingerprint[16] = {0x0A,0x13,0xAE,0x61,0x18,0x17,0x13,0xA4,0x10,0x0D,0xFF,0xB3,0xEC,0x29,0x38,0x22};
+
+uint32_t ColumnParent::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_column_family = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->column_family);
+ isset_column_family = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->super_column);
+ this->__isset.super_column = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_column_family)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t ColumnParent::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ColumnParent");
+ xfer += oprot->writeFieldBegin("column_family", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->column_family);
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.super_column) {
+ xfer += oprot->writeFieldBegin("super_column", ::apache::thrift::protocol::T_STRING, 4);
+ xfer += oprot->writeBinary(this->super_column);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* ColumnPath::ascii_fingerprint = "606212895BCF63C757913CF35AEB3462";
+const uint8_t ColumnPath::binary_fingerprint[16] = {0x60,0x62,0x12,0x89,0x5B,0xCF,0x63,0xC7,0x57,0x91,0x3C,0xF3,0x5A,0xEB,0x34,0x62};
+
+uint32_t ColumnPath::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_column_family = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->column_family);
+ isset_column_family = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->super_column);
+ this->__isset.super_column = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 5:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->column);
+ this->__isset.column = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_column_family)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t ColumnPath::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ColumnPath");
+ xfer += oprot->writeFieldBegin("column_family", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->column_family);
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.super_column) {
+ xfer += oprot->writeFieldBegin("super_column", ::apache::thrift::protocol::T_STRING, 4);
+ xfer += oprot->writeBinary(this->super_column);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.column) {
+ xfer += oprot->writeFieldBegin("column", ::apache::thrift::protocol::T_STRING, 5);
+ xfer += oprot->writeBinary(this->column);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* SliceRange::ascii_fingerprint = "184D24C9A0B8D4415E234DB649CAE740";
+const uint8_t SliceRange::binary_fingerprint[16] = {0x18,0x4D,0x24,0xC9,0xA0,0xB8,0xD4,0x41,0x5E,0x23,0x4D,0xB6,0x49,0xCA,0xE7,0x40};
+
+uint32_t SliceRange::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_start = false;
+ bool isset_finish = false;
+ bool isset_reversed = false;
+ bool isset_count = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->start);
+ isset_start = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->finish);
+ isset_finish = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_BOOL) {
+ xfer += iprot->readBool(this->reversed);
+ isset_reversed = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->count);
+ isset_count = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_start)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_finish)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_reversed)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_count)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t SliceRange::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("SliceRange");
+ xfer += oprot->writeFieldBegin("start", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->start);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("finish", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeBinary(this->finish);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("reversed", ::apache::thrift::protocol::T_BOOL, 3);
+ xfer += oprot->writeBool(this->reversed);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("count", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32(this->count);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* SlicePredicate::ascii_fingerprint = "F59D1D81C17DFFAF09988BF1C9CE5E27";
+const uint8_t SlicePredicate::binary_fingerprint[16] = {0xF5,0x9D,0x1D,0x81,0xC1,0x7D,0xFF,0xAF,0x09,0x98,0x8B,0xF1,0xC9,0xCE,0x5E,0x27};
+
+uint32_t SlicePredicate::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->column_names.clear();
+ uint32_t _size12;
+ ::apache::thrift::protocol::TType _etype15;
+ iprot->readListBegin(_etype15, _size12);
+ this->column_names.resize(_size12);
+ uint32_t _i16;
+ for (_i16 = 0; _i16 < _size12; ++_i16)
+ {
+ xfer += iprot->readBinary(this->column_names[_i16]);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.column_names = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->slice_range.read(iprot);
+ this->__isset.slice_range = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t SlicePredicate::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("SlicePredicate");
+ if (this->__isset.column_names) {
+ xfer += oprot->writeFieldBegin("column_names", ::apache::thrift::protocol::T_LIST, 1);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->column_names.size()));
+ std::vector<std::string> ::const_iterator _iter17;
+ for (_iter17 = this->column_names.begin(); _iter17 != this->column_names.end(); ++_iter17)
+ {
+ xfer += oprot->writeBinary((*_iter17));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.slice_range) {
+ xfer += oprot->writeFieldBegin("slice_range", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->slice_range.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* IndexExpression::ascii_fingerprint = "D9F4CFE2F293A8B1052FD3031DD2C847";
+const uint8_t IndexExpression::binary_fingerprint[16] = {0xD9,0xF4,0xCF,0xE2,0xF2,0x93,0xA8,0xB1,0x05,0x2F,0xD3,0x03,0x1D,0xD2,0xC8,0x47};
+
+uint32_t IndexExpression::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_column_name = false;
+ bool isset_op = false;
+ bool isset_value = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->column_name);
+ isset_column_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast18;
+ xfer += iprot->readI32(ecast18);
+ this->op = (IndexOperator::type)ecast18;
+ isset_op = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->value);
+ isset_value = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_column_name)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_op)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_value)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t IndexExpression::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("IndexExpression");
+ xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->column_name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("op", ::apache::thrift::protocol::T_I32, 2);
+ xfer += oprot->writeI32((int32_t)this->op);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeBinary(this->value);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* IndexClause::ascii_fingerprint = "9B551B9AB86120B0EEA9005C77FD3C1F";
+const uint8_t IndexClause::binary_fingerprint[16] = {0x9B,0x55,0x1B,0x9A,0xB8,0x61,0x20,0xB0,0xEE,0xA9,0x00,0x5C,0x77,0xFD,0x3C,0x1F};
+
+uint32_t IndexClause::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_expressions = false;
+ bool isset_start_key = false;
+ bool isset_count = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->expressions.clear();
+ uint32_t _size19;
+ ::apache::thrift::protocol::TType _etype22;
+ iprot->readListBegin(_etype22, _size19);
+ this->expressions.resize(_size19);
+ uint32_t _i23;
+ for (_i23 = 0; _i23 < _size19; ++_i23)
+ {
+ xfer += this->expressions[_i23].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ isset_expressions = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->start_key);
+ isset_start_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->count);
+ isset_count = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_expressions)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_start_key)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_count)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t IndexClause::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("IndexClause");
+ xfer += oprot->writeFieldBegin("expressions", ::apache::thrift::protocol::T_LIST, 1);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->expressions.size()));
+ std::vector<IndexExpression> ::const_iterator _iter24;
+ for (_iter24 = this->expressions.begin(); _iter24 != this->expressions.end(); ++_iter24)
+ {
+ xfer += (*_iter24).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("start_key", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeBinary(this->start_key);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("count", ::apache::thrift::protocol::T_I32, 3);
+ xfer += oprot->writeI32(this->count);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* KeyRange::ascii_fingerprint = "A6EC82FA0980B91C7C8EB013C61CA1B0";
+const uint8_t KeyRange::binary_fingerprint[16] = {0xA6,0xEC,0x82,0xFA,0x09,0x80,0xB9,0x1C,0x7C,0x8E,0xB0,0x13,0xC6,0x1C,0xA1,0xB0};
+
+uint32_t KeyRange::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_count = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->start_key);
+ this->__isset.start_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->end_key);
+ this->__isset.end_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->start_token);
+ this->__isset.start_token = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->end_token);
+ this->__isset.end_token = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 6:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->row_filter.clear();
+ uint32_t _size25;
+ ::apache::thrift::protocol::TType _etype28;
+ iprot->readListBegin(_etype28, _size25);
+ this->row_filter.resize(_size25);
+ uint32_t _i29;
+ for (_i29 = 0; _i29 < _size25; ++_i29)
+ {
+ xfer += this->row_filter[_i29].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.row_filter = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 5:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->count);
+ isset_count = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_count)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t KeyRange::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("KeyRange");
+ if (this->__isset.start_key) {
+ xfer += oprot->writeFieldBegin("start_key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->start_key);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.end_key) {
+ xfer += oprot->writeFieldBegin("end_key", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeBinary(this->end_key);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.start_token) {
+ xfer += oprot->writeFieldBegin("start_token", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->start_token);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.end_token) {
+ xfer += oprot->writeFieldBegin("end_token", ::apache::thrift::protocol::T_STRING, 4);
+ xfer += oprot->writeString(this->end_token);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldBegin("count", ::apache::thrift::protocol::T_I32, 5);
+ xfer += oprot->writeI32(this->count);
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.row_filter) {
+ xfer += oprot->writeFieldBegin("row_filter", ::apache::thrift::protocol::T_LIST, 6);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->row_filter.size()));
+ std::vector<IndexExpression> ::const_iterator _iter30;
+ for (_iter30 = this->row_filter.begin(); _iter30 != this->row_filter.end(); ++_iter30)
+ {
+ xfer += (*_iter30).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* KeySlice::ascii_fingerprint = "D1568675B0C135C909E3169B72A4DA3D";
+const uint8_t KeySlice::binary_fingerprint[16] = {0xD1,0x56,0x86,0x75,0xB0,0xC1,0x35,0xC9,0x09,0xE3,0x16,0x9B,0x72,0xA4,0xDA,0x3D};
+
+uint32_t KeySlice::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_key = false;
+ bool isset_columns = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->key);
+ isset_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->columns.clear();
+ uint32_t _size31;
+ ::apache::thrift::protocol::TType _etype34;
+ iprot->readListBegin(_etype34, _size31);
+ this->columns.resize(_size31);
+ uint32_t _i35;
+ for (_i35 = 0; _i35 < _size31; ++_i35)
+ {
+ xfer += this->columns[_i35].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ isset_columns = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_key)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_columns)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t KeySlice::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("KeySlice");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->key);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("columns", ::apache::thrift::protocol::T_LIST, 2);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->columns.size()));
+ std::vector<ColumnOrSuperColumn> ::const_iterator _iter36;
+ for (_iter36 = this->columns.begin(); _iter36 != this->columns.end(); ++_iter36)
+ {
+ xfer += (*_iter36).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* KeyCount::ascii_fingerprint = "EEBC915CE44901401D881E6091423036";
+const uint8_t KeyCount::binary_fingerprint[16] = {0xEE,0xBC,0x91,0x5C,0xE4,0x49,0x01,0x40,0x1D,0x88,0x1E,0x60,0x91,0x42,0x30,0x36};
+
+uint32_t KeyCount::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_key = false;
+ bool isset_count = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->key);
+ isset_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->count);
+ isset_count = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_key)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_count)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t KeyCount::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("KeyCount");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->key);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("count", ::apache::thrift::protocol::T_I32, 2);
+ xfer += oprot->writeI32(this->count);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* Deletion::ascii_fingerprint = "40F33ECF1C932CA77C2414C4E6C60CBE";
+const uint8_t Deletion::binary_fingerprint[16] = {0x40,0xF3,0x3E,0xCF,0x1C,0x93,0x2C,0xA7,0x7C,0x24,0x14,0xC4,0xE6,0xC6,0x0C,0xBE};
+
+uint32_t Deletion::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_I64) {
+ xfer += iprot->readI64(this->timestamp);
+ this->__isset.timestamp = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->super_column);
+ this->__isset.super_column = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->predicate.read(iprot);
+ this->__isset.predicate = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Deletion::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Deletion");
+ if (this->__isset.timestamp) {
+ xfer += oprot->writeFieldBegin("timestamp", ::apache::thrift::protocol::T_I64, 1);
+ xfer += oprot->writeI64(this->timestamp);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.super_column) {
+ xfer += oprot->writeFieldBegin("super_column", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeBinary(this->super_column);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.predicate) {
+ xfer += oprot->writeFieldBegin("predicate", ::apache::thrift::protocol::T_STRUCT, 3);
+ xfer += this->predicate.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* Mutation::ascii_fingerprint = "E8B65DF3979C6868F80DF81F8E769E63";
+const uint8_t Mutation::binary_fingerprint[16] = {0xE8,0xB6,0x5D,0xF3,0x97,0x9C,0x68,0x68,0xF8,0x0D,0xF8,0x1F,0x8E,0x76,0x9E,0x63};
+
+uint32_t Mutation::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->column_or_supercolumn.read(iprot);
+ this->__isset.column_or_supercolumn = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->deletion.read(iprot);
+ this->__isset.deletion = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t Mutation::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("Mutation");
+ if (this->__isset.column_or_supercolumn) {
+ xfer += oprot->writeFieldBegin("column_or_supercolumn", ::apache::thrift::protocol::T_STRUCT, 1);
+ xfer += this->column_or_supercolumn.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.deletion) {
+ xfer += oprot->writeFieldBegin("deletion", ::apache::thrift::protocol::T_STRUCT, 2);
+ xfer += this->deletion.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* EndpointDetails::ascii_fingerprint = "F4A50F0EC638C7F66026F9B6678FD89B";
+const uint8_t EndpointDetails::binary_fingerprint[16] = {0xF4,0xA5,0x0F,0x0E,0xC6,0x38,0xC7,0xF6,0x60,0x26,0xF9,0xB6,0x67,0x8F,0xD8,0x9B};
+
+uint32_t EndpointDetails::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->host);
+ this->__isset.host = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->datacenter);
+ this->__isset.datacenter = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->rack);
+ this->__isset.rack = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ return xfer;
+}
+
+uint32_t EndpointDetails::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("EndpointDetails");
+ xfer += oprot->writeFieldBegin("host", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->host);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("datacenter", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->datacenter);
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.rack) {
+ xfer += oprot->writeFieldBegin("rack", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->rack);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* TokenRange::ascii_fingerprint = "832268DC4CD6B17EE8881FC57EA04679";
+const uint8_t TokenRange::binary_fingerprint[16] = {0x83,0x22,0x68,0xDC,0x4C,0xD6,0xB1,0x7E,0xE8,0x88,0x1F,0xC5,0x7E,0xA0,0x46,0x79};
+
+uint32_t TokenRange::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_start_token = false;
+ bool isset_end_token = false;
+ bool isset_endpoints = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->start_token);
+ isset_start_token = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->end_token);
+ isset_end_token = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->endpoints.clear();
+ uint32_t _size37;
+ ::apache::thrift::protocol::TType _etype40;
+ iprot->readListBegin(_etype40, _size37);
+ this->endpoints.resize(_size37);
+ uint32_t _i41;
+ for (_i41 = 0; _i41 < _size37; ++_i41)
+ {
+ xfer += iprot->readString(this->endpoints[_i41]);
+ }
+ iprot->readListEnd();
+ }
+ isset_endpoints = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->rpc_endpoints.clear();
+ uint32_t _size42;
+ ::apache::thrift::protocol::TType _etype45;
+ iprot->readListBegin(_etype45, _size42);
+ this->rpc_endpoints.resize(_size42);
+ uint32_t _i46;
+ for (_i46 = 0; _i46 < _size42; ++_i46)
+ {
+ xfer += iprot->readString(this->rpc_endpoints[_i46]);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.rpc_endpoints = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 5:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->endpoint_details.clear();
+ uint32_t _size47;
+ ::apache::thrift::protocol::TType _etype50;
+ iprot->readListBegin(_etype50, _size47);
+ this->endpoint_details.resize(_size47);
+ uint32_t _i51;
+ for (_i51 = 0; _i51 < _size47; ++_i51)
+ {
+ xfer += this->endpoint_details[_i51].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.endpoint_details = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_start_token)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_end_token)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_endpoints)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t TokenRange::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("TokenRange");
+ xfer += oprot->writeFieldBegin("start_token", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->start_token);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("end_token", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->end_token);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("endpoints", ::apache::thrift::protocol::T_LIST, 3);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->endpoints.size()));
+ std::vector<std::string> ::const_iterator _iter52;
+ for (_iter52 = this->endpoints.begin(); _iter52 != this->endpoints.end(); ++_iter52)
+ {
+ xfer += oprot->writeString((*_iter52));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.rpc_endpoints) {
+ xfer += oprot->writeFieldBegin("rpc_endpoints", ::apache::thrift::protocol::T_LIST, 4);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->rpc_endpoints.size()));
+ std::vector<std::string> ::const_iterator _iter53;
+ for (_iter53 = this->rpc_endpoints.begin(); _iter53 != this->rpc_endpoints.end(); ++_iter53)
+ {
+ xfer += oprot->writeString((*_iter53));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.endpoint_details) {
+ xfer += oprot->writeFieldBegin("endpoint_details", ::apache::thrift::protocol::T_LIST, 5);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->endpoint_details.size()));
+ std::vector<EndpointDetails> ::const_iterator _iter54;
+ for (_iter54 = this->endpoint_details.begin(); _iter54 != this->endpoint_details.end(); ++_iter54)
+ {
+ xfer += (*_iter54).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* AuthenticationRequest::ascii_fingerprint = "5EA2D527ECA3BA20C77AFC023EE8C05F";
+const uint8_t AuthenticationRequest::binary_fingerprint[16] = {0x5E,0xA2,0xD5,0x27,0xEC,0xA3,0xBA,0x20,0xC7,0x7A,0xFC,0x02,0x3E,0xE8,0xC0,0x5F};
+
+uint32_t AuthenticationRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_credentials = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->credentials.clear();
+ uint32_t _size55;
+ ::apache::thrift::protocol::TType _ktype56;
+ ::apache::thrift::protocol::TType _vtype57;
+ iprot->readMapBegin(_ktype56, _vtype57, _size55);
+ uint32_t _i59;
+ for (_i59 = 0; _i59 < _size55; ++_i59)
+ {
+ std::string _key60;
+ xfer += iprot->readString(_key60);
+ std::string& _val61 = this->credentials[_key60];
+ xfer += iprot->readString(_val61);
+ }
+ iprot->readMapEnd();
+ }
+ isset_credentials = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_credentials)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t AuthenticationRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("AuthenticationRequest");
+ xfer += oprot->writeFieldBegin("credentials", ::apache::thrift::protocol::T_MAP, 1);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->credentials.size()));
+ std::map<std::string, std::string> ::const_iterator _iter62;
+ for (_iter62 = this->credentials.begin(); _iter62 != this->credentials.end(); ++_iter62)
+ {
+ xfer += oprot->writeString(_iter62->first);
+ xfer += oprot->writeString(_iter62->second);
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* ColumnDef::ascii_fingerprint = "0D89CE83D7EDAD079AC3213ED1DCAA58";
+const uint8_t ColumnDef::binary_fingerprint[16] = {0x0D,0x89,0xCE,0x83,0xD7,0xED,0xAD,0x07,0x9A,0xC3,0x21,0x3E,0xD1,0xDC,0xAA,0x58};
+
+uint32_t ColumnDef::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_name = false;
+ bool isset_validation_class = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->name);
+ isset_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->validation_class);
+ isset_validation_class = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast63;
+ xfer += iprot->readI32(ecast63);
+ this->index_type = (IndexType::type)ecast63;
+ this->__isset.index_type = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->index_name);
+ this->__isset.index_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 5:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->index_options.clear();
+ uint32_t _size64;
+ ::apache::thrift::protocol::TType _ktype65;
+ ::apache::thrift::protocol::TType _vtype66;
+ iprot->readMapBegin(_ktype65, _vtype66, _size64);
+ uint32_t _i68;
+ for (_i68 = 0; _i68 < _size64; ++_i68)
+ {
+ std::string _key69;
+ xfer += iprot->readString(_key69);
+ std::string& _val70 = this->index_options[_key69];
+ xfer += iprot->readString(_val70);
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.index_options = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_name)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_validation_class)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t ColumnDef::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("ColumnDef");
+ xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("validation_class", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->validation_class);
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.index_type) {
+ xfer += oprot->writeFieldBegin("index_type", ::apache::thrift::protocol::T_I32, 3);
+ xfer += oprot->writeI32((int32_t)this->index_type);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.index_name) {
+ xfer += oprot->writeFieldBegin("index_name", ::apache::thrift::protocol::T_STRING, 4);
+ xfer += oprot->writeString(this->index_name);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.index_options) {
+ xfer += oprot->writeFieldBegin("index_options", ::apache::thrift::protocol::T_MAP, 5);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->index_options.size()));
+ std::map<std::string, std::string> ::const_iterator _iter71;
+ for (_iter71 = this->index_options.begin(); _iter71 != this->index_options.end(); ++_iter71)
+ {
+ xfer += oprot->writeString(_iter71->first);
+ xfer += oprot->writeString(_iter71->second);
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* CfDef::ascii_fingerprint = "231A260521B5DD99EFBCCBDD8768CA7D";
+const uint8_t CfDef::binary_fingerprint[16] = {0x23,0x1A,0x26,0x05,0x21,0xB5,0xDD,0x99,0xEF,0xBC,0xCB,0xDD,0x87,0x68,0xCA,0x7D};
+
+uint32_t CfDef::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_keyspace = false;
+ bool isset_name = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->keyspace);
+ isset_keyspace = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->name);
+ isset_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->column_type);
+ this->__isset.column_type = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 5:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->comparator_type);
+ this->__isset.comparator_type = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 6:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->subcomparator_type);
+ this->__isset.subcomparator_type = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 8:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->comment);
+ this->__isset.comment = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 12:
+ if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
+ xfer += iprot->readDouble(this->read_repair_chance);
+ this->__isset.read_repair_chance = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 13:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->column_metadata.clear();
+ uint32_t _size72;
+ ::apache::thrift::protocol::TType _etype75;
+ iprot->readListBegin(_etype75, _size72);
+ this->column_metadata.resize(_size72);
+ uint32_t _i76;
+ for (_i76 = 0; _i76 < _size72; ++_i76)
+ {
+ xfer += this->column_metadata[_i76].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.column_metadata = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 14:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->gc_grace_seconds);
+ this->__isset.gc_grace_seconds = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 15:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->default_validation_class);
+ this->__isset.default_validation_class = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 16:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->id);
+ this->__isset.id = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 17:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->min_compaction_threshold);
+ this->__isset.min_compaction_threshold = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 18:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->max_compaction_threshold);
+ this->__isset.max_compaction_threshold = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 24:
+ if (ftype == ::apache::thrift::protocol::T_BOOL) {
+ xfer += iprot->readBool(this->replicate_on_write);
+ this->__isset.replicate_on_write = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 26:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->key_validation_class);
+ this->__isset.key_validation_class = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 28:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->key_alias);
+ this->__isset.key_alias = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 29:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->compaction_strategy);
+ this->__isset.compaction_strategy = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 30:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->compaction_strategy_options.clear();
+ uint32_t _size77;
+ ::apache::thrift::protocol::TType _ktype78;
+ ::apache::thrift::protocol::TType _vtype79;
+ iprot->readMapBegin(_ktype78, _vtype79, _size77);
+ uint32_t _i81;
+ for (_i81 = 0; _i81 < _size77; ++_i81)
+ {
+ std::string _key82;
+ xfer += iprot->readString(_key82);
+ std::string& _val83 = this->compaction_strategy_options[_key82];
+ xfer += iprot->readString(_val83);
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.compaction_strategy_options = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 32:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->compression_options.clear();
+ uint32_t _size84;
+ ::apache::thrift::protocol::TType _ktype85;
+ ::apache::thrift::protocol::TType _vtype86;
+ iprot->readMapBegin(_ktype85, _vtype86, _size84);
+ uint32_t _i88;
+ for (_i88 = 0; _i88 < _size84; ++_i88)
+ {
+ std::string _key89;
+ xfer += iprot->readString(_key89);
+ std::string& _val90 = this->compression_options[_key89];
+ xfer += iprot->readString(_val90);
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.compression_options = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 33:
+ if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
+ xfer += iprot->readDouble(this->bloom_filter_fp_chance);
+ this->__isset.bloom_filter_fp_chance = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 34:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->caching);
+ this->__isset.caching = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 37:
+ if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
+ xfer += iprot->readDouble(this->dclocal_read_repair_chance);
+ this->__isset.dclocal_read_repair_chance = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 9:
+ if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
+ xfer += iprot->readDouble(this->row_cache_size);
+ this->__isset.row_cache_size = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 11:
+ if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
+ xfer += iprot->readDouble(this->key_cache_size);
+ this->__isset.key_cache_size = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 19:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->row_cache_save_period_in_seconds);
+ this->__isset.row_cache_save_period_in_seconds = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 20:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->key_cache_save_period_in_seconds);
+ this->__isset.key_cache_save_period_in_seconds = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 21:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->memtable_flush_after_mins);
+ this->__isset.memtable_flush_after_mins = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 22:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->memtable_throughput_in_mb);
+ this->__isset.memtable_throughput_in_mb = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 23:
+ if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
+ xfer += iprot->readDouble(this->memtable_operations_in_millions);
+ this->__isset.memtable_operations_in_millions = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 25:
+ if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
+ xfer += iprot->readDouble(this->merge_shards_chance);
+ this->__isset.merge_shards_chance = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 27:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->row_cache_provider);
+ this->__isset.row_cache_provider = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 31:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->row_cache_keys_to_save);
+ this->__isset.row_cache_keys_to_save = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_keyspace)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_name)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t CfDef::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("CfDef");
+ xfer += oprot->writeFieldBegin("keyspace", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->keyspace);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->name);
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.column_type) {
+ xfer += oprot->writeFieldBegin("column_type", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->column_type);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.comparator_type) {
+ xfer += oprot->writeFieldBegin("comparator_type", ::apache::thrift::protocol::T_STRING, 5);
+ xfer += oprot->writeString(this->comparator_type);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.subcomparator_type) {
+ xfer += oprot->writeFieldBegin("subcomparator_type", ::apache::thrift::protocol::T_STRING, 6);
+ xfer += oprot->writeString(this->subcomparator_type);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.comment) {
+ xfer += oprot->writeFieldBegin("comment", ::apache::thrift::protocol::T_STRING, 8);
+ xfer += oprot->writeString(this->comment);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.row_cache_size) {
+ xfer += oprot->writeFieldBegin("row_cache_size", ::apache::thrift::protocol::T_DOUBLE, 9);
+ xfer += oprot->writeDouble(this->row_cache_size);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.key_cache_size) {
+ xfer += oprot->writeFieldBegin("key_cache_size", ::apache::thrift::protocol::T_DOUBLE, 11);
+ xfer += oprot->writeDouble(this->key_cache_size);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.read_repair_chance) {
+ xfer += oprot->writeFieldBegin("read_repair_chance", ::apache::thrift::protocol::T_DOUBLE, 12);
+ xfer += oprot->writeDouble(this->read_repair_chance);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.column_metadata) {
+ xfer += oprot->writeFieldBegin("column_metadata", ::apache::thrift::protocol::T_LIST, 13);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->column_metadata.size()));
+ std::vector<ColumnDef> ::const_iterator _iter91;
+ for (_iter91 = this->column_metadata.begin(); _iter91 != this->column_metadata.end(); ++_iter91)
+ {
+ xfer += (*_iter91).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.gc_grace_seconds) {
+ xfer += oprot->writeFieldBegin("gc_grace_seconds", ::apache::thrift::protocol::T_I32, 14);
+ xfer += oprot->writeI32(this->gc_grace_seconds);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.default_validation_class) {
+ xfer += oprot->writeFieldBegin("default_validation_class", ::apache::thrift::protocol::T_STRING, 15);
+ xfer += oprot->writeString(this->default_validation_class);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.id) {
+ xfer += oprot->writeFieldBegin("id", ::apache::thrift::protocol::T_I32, 16);
+ xfer += oprot->writeI32(this->id);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.min_compaction_threshold) {
+ xfer += oprot->writeFieldBegin("min_compaction_threshold", ::apache::thrift::protocol::T_I32, 17);
+ xfer += oprot->writeI32(this->min_compaction_threshold);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.max_compaction_threshold) {
+ xfer += oprot->writeFieldBegin("max_compaction_threshold", ::apache::thrift::protocol::T_I32, 18);
+ xfer += oprot->writeI32(this->max_compaction_threshold);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.row_cache_save_period_in_seconds) {
+ xfer += oprot->writeFieldBegin("row_cache_save_period_in_seconds", ::apache::thrift::protocol::T_I32, 19);
+ xfer += oprot->writeI32(this->row_cache_save_period_in_seconds);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.key_cache_save_period_in_seconds) {
+ xfer += oprot->writeFieldBegin("key_cache_save_period_in_seconds", ::apache::thrift::protocol::T_I32, 20);
+ xfer += oprot->writeI32(this->key_cache_save_period_in_seconds);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.memtable_flush_after_mins) {
+ xfer += oprot->writeFieldBegin("memtable_flush_after_mins", ::apache::thrift::protocol::T_I32, 21);
+ xfer += oprot->writeI32(this->memtable_flush_after_mins);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.memtable_throughput_in_mb) {
+ xfer += oprot->writeFieldBegin("memtable_throughput_in_mb", ::apache::thrift::protocol::T_I32, 22);
+ xfer += oprot->writeI32(this->memtable_throughput_in_mb);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.memtable_operations_in_millions) {
+ xfer += oprot->writeFieldBegin("memtable_operations_in_millions", ::apache::thrift::protocol::T_DOUBLE, 23);
+ xfer += oprot->writeDouble(this->memtable_operations_in_millions);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.replicate_on_write) {
+ xfer += oprot->writeFieldBegin("replicate_on_write", ::apache::thrift::protocol::T_BOOL, 24);
+ xfer += oprot->writeBool(this->replicate_on_write);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.merge_shards_chance) {
+ xfer += oprot->writeFieldBegin("merge_shards_chance", ::apache::thrift::protocol::T_DOUBLE, 25);
+ xfer += oprot->writeDouble(this->merge_shards_chance);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.key_validation_class) {
+ xfer += oprot->writeFieldBegin("key_validation_class", ::apache::thrift::protocol::T_STRING, 26);
+ xfer += oprot->writeString(this->key_validation_class);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.row_cache_provider) {
+ xfer += oprot->writeFieldBegin("row_cache_provider", ::apache::thrift::protocol::T_STRING, 27);
+ xfer += oprot->writeString(this->row_cache_provider);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.key_alias) {
+ xfer += oprot->writeFieldBegin("key_alias", ::apache::thrift::protocol::T_STRING, 28);
+ xfer += oprot->writeBinary(this->key_alias);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.compaction_strategy) {
+ xfer += oprot->writeFieldBegin("compaction_strategy", ::apache::thrift::protocol::T_STRING, 29);
+ xfer += oprot->writeString(this->compaction_strategy);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.compaction_strategy_options) {
+ xfer += oprot->writeFieldBegin("compaction_strategy_options", ::apache::thrift::protocol::T_MAP, 30);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->compaction_strategy_options.size()));
+ std::map<std::string, std::string> ::const_iterator _iter92;
+ for (_iter92 = this->compaction_strategy_options.begin(); _iter92 != this->compaction_strategy_options.end(); ++_iter92)
+ {
+ xfer += oprot->writeString(_iter92->first);
+ xfer += oprot->writeString(_iter92->second);
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.row_cache_keys_to_save) {
+ xfer += oprot->writeFieldBegin("row_cache_keys_to_save", ::apache::thrift::protocol::T_I32, 31);
+ xfer += oprot->writeI32(this->row_cache_keys_to_save);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.compression_options) {
+ xfer += oprot->writeFieldBegin("compression_options", ::apache::thrift::protocol::T_MAP, 32);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->compression_options.size()));
+ std::map<std::string, std::string> ::const_iterator _iter93;
+ for (_iter93 = this->compression_options.begin(); _iter93 != this->compression_options.end(); ++_iter93)
+ {
+ xfer += oprot->writeString(_iter93->first);
+ xfer += oprot->writeString(_iter93->second);
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.bloom_filter_fp_chance) {
+ xfer += oprot->writeFieldBegin("bloom_filter_fp_chance", ::apache::thrift::protocol::T_DOUBLE, 33);
+ xfer += oprot->writeDouble(this->bloom_filter_fp_chance);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.caching) {
+ xfer += oprot->writeFieldBegin("caching", ::apache::thrift::protocol::T_STRING, 34);
+ xfer += oprot->writeString(this->caching);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.dclocal_read_repair_chance) {
+ xfer += oprot->writeFieldBegin("dclocal_read_repair_chance", ::apache::thrift::protocol::T_DOUBLE, 37);
+ xfer += oprot->writeDouble(this->dclocal_read_repair_chance);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* KsDef::ascii_fingerprint = "0767851B6476EB3777A21E59E912E11A";
+const uint8_t KsDef::binary_fingerprint[16] = {0x07,0x67,0x85,0x1B,0x64,0x76,0xEB,0x37,0x77,0xA2,0x1E,0x59,0xE9,0x12,0xE1,0x1A};
+
+uint32_t KsDef::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_name = false;
+ bool isset_strategy_class = false;
+ bool isset_cf_defs = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->name);
+ isset_name = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->strategy_class);
+ isset_strategy_class = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->strategy_options.clear();
+ uint32_t _size94;
+ ::apache::thrift::protocol::TType _ktype95;
+ ::apache::thrift::protocol::TType _vtype96;
+ iprot->readMapBegin(_ktype95, _vtype96, _size94);
+ uint32_t _i98;
+ for (_i98 = 0; _i98 < _size94; ++_i98)
+ {
+ std::string _key99;
+ xfer += iprot->readString(_key99);
+ std::string& _val100 = this->strategy_options[_key99];
+ xfer += iprot->readString(_val100);
+ }
+ iprot->readMapEnd();
+ }
+ this->__isset.strategy_options = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->replication_factor);
+ this->__isset.replication_factor = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 5:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->cf_defs.clear();
+ uint32_t _size101;
+ ::apache::thrift::protocol::TType _etype104;
+ iprot->readListBegin(_etype104, _size101);
+ this->cf_defs.resize(_size101);
+ uint32_t _i105;
+ for (_i105 = 0; _i105 < _size101; ++_i105)
+ {
+ xfer += this->cf_defs[_i105].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ isset_cf_defs = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 6:
+ if (ftype == ::apache::thrift::protocol::T_BOOL) {
+ xfer += iprot->readBool(this->durable_writes);
+ this->__isset.durable_writes = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_name)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_strategy_class)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_cf_defs)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t KsDef::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("KsDef");
+ xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeString(this->name);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("strategy_class", ::apache::thrift::protocol::T_STRING, 2);
+ xfer += oprot->writeString(this->strategy_class);
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.strategy_options) {
+ xfer += oprot->writeFieldBegin("strategy_options", ::apache::thrift::protocol::T_MAP, 3);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->strategy_options.size()));
+ std::map<std::string, std::string> ::const_iterator _iter106;
+ for (_iter106 = this->strategy_options.begin(); _iter106 != this->strategy_options.end(); ++_iter106)
+ {
+ xfer += oprot->writeString(_iter106->first);
+ xfer += oprot->writeString(_iter106->second);
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.replication_factor) {
+ xfer += oprot->writeFieldBegin("replication_factor", ::apache::thrift::protocol::T_I32, 4);
+ xfer += oprot->writeI32(this->replication_factor);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldBegin("cf_defs", ::apache::thrift::protocol::T_LIST, 5);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->cf_defs.size()));
+ std::vector<CfDef> ::const_iterator _iter107;
+ for (_iter107 = this->cf_defs.begin(); _iter107 != this->cf_defs.end(); ++_iter107)
+ {
+ xfer += (*_iter107).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.durable_writes) {
+ xfer += oprot->writeFieldBegin("durable_writes", ::apache::thrift::protocol::T_BOOL, 6);
+ xfer += oprot->writeBool(this->durable_writes);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* CqlRow::ascii_fingerprint = "470EFC558004E98D92D604898305C04E";
+const uint8_t CqlRow::binary_fingerprint[16] = {0x47,0x0E,0xFC,0x55,0x80,0x04,0xE9,0x8D,0x92,0xD6,0x04,0x89,0x83,0x05,0xC0,0x4E};
+
+uint32_t CqlRow::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_key = false;
+ bool isset_columns = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readBinary(this->key);
+ isset_key = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->columns.clear();
+ uint32_t _size108;
+ ::apache::thrift::protocol::TType _etype111;
+ iprot->readListBegin(_etype111, _size108);
+ this->columns.resize(_size108);
+ uint32_t _i112;
+ for (_i112 = 0; _i112 < _size108; ++_i112)
+ {
+ xfer += this->columns[_i112].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ isset_columns = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_key)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_columns)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t CqlRow::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("CqlRow");
+ xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+ xfer += oprot->writeBinary(this->key);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("columns", ::apache::thrift::protocol::T_LIST, 2);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->columns.size()));
+ std::vector<Column> ::const_iterator _iter113;
+ for (_iter113 = this->columns.begin(); _iter113 != this->columns.end(); ++_iter113)
+ {
+ xfer += (*_iter113).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* CqlMetadata::ascii_fingerprint = "B7C5A4AA9652C744A48EBC1C12D531E7";
+const uint8_t CqlMetadata::binary_fingerprint[16] = {0xB7,0xC5,0xA4,0xAA,0x96,0x52,0xC7,0x44,0xA4,0x8E,0xBC,0x1C,0x12,0xD5,0x31,0xE7};
+
+uint32_t CqlMetadata::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_name_types = false;
+ bool isset_value_types = false;
+ bool isset_default_name_type = false;
+ bool isset_default_value_type = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->name_types.clear();
+ uint32_t _size114;
+ ::apache::thrift::protocol::TType _ktype115;
+ ::apache::thrift::protocol::TType _vtype116;
+ iprot->readMapBegin(_ktype115, _vtype116, _size114);
+ uint32_t _i118;
+ for (_i118 = 0; _i118 < _size114; ++_i118)
+ {
+ std::string _key119;
+ xfer += iprot->readBinary(_key119);
+ std::string& _val120 = this->name_types[_key119];
+ xfer += iprot->readString(_val120);
+ }
+ iprot->readMapEnd();
+ }
+ isset_name_types = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_MAP) {
+ {
+ this->value_types.clear();
+ uint32_t _size121;
+ ::apache::thrift::protocol::TType _ktype122;
+ ::apache::thrift::protocol::TType _vtype123;
+ iprot->readMapBegin(_ktype122, _vtype123, _size121);
+ uint32_t _i125;
+ for (_i125 = 0; _i125 < _size121; ++_i125)
+ {
+ std::string _key126;
+ xfer += iprot->readBinary(_key126);
+ std::string& _val127 = this->value_types[_key126];
+ xfer += iprot->readString(_val127);
+ }
+ iprot->readMapEnd();
+ }
+ isset_value_types = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->default_name_type);
+ isset_default_name_type = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRING) {
+ xfer += iprot->readString(this->default_value_type);
+ isset_default_value_type = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_name_types)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_value_types)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_default_name_type)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_default_value_type)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t CqlMetadata::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("CqlMetadata");
+ xfer += oprot->writeFieldBegin("name_types", ::apache::thrift::protocol::T_MAP, 1);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->name_types.size()));
+ std::map<std::string, std::string> ::const_iterator _iter128;
+ for (_iter128 = this->name_types.begin(); _iter128 != this->name_types.end(); ++_iter128)
+ {
+ xfer += oprot->writeBinary(_iter128->first);
+ xfer += oprot->writeString(_iter128->second);
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("value_types", ::apache::thrift::protocol::T_MAP, 2);
+ {
+ xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->value_types.size()));
+ std::map<std::string, std::string> ::const_iterator _iter129;
+ for (_iter129 = this->value_types.begin(); _iter129 != this->value_types.end(); ++_iter129)
+ {
+ xfer += oprot->writeBinary(_iter129->first);
+ xfer += oprot->writeString(_iter129->second);
+ }
+ xfer += oprot->writeMapEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("default_name_type", ::apache::thrift::protocol::T_STRING, 3);
+ xfer += oprot->writeString(this->default_name_type);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("default_value_type", ::apache::thrift::protocol::T_STRING, 4);
+ xfer += oprot->writeString(this->default_value_type);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* CqlResult::ascii_fingerprint = "521B9CE5AF77539F7267F6952B609E81";
+const uint8_t CqlResult::binary_fingerprint[16] = {0x52,0x1B,0x9C,0xE5,0xAF,0x77,0x53,0x9F,0x72,0x67,0xF6,0x95,0x2B,0x60,0x9E,0x81};
+
+uint32_t CqlResult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_type = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ int32_t ecast130;
+ xfer += iprot->readI32(ecast130);
+ this->type = (CqlResultType::type)ecast130;
+ isset_type = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->rows.clear();
+ uint32_t _size131;
+ ::apache::thrift::protocol::TType _etype134;
+ iprot->readListBegin(_etype134, _size131);
+ this->rows.resize(_size131);
+ uint32_t _i135;
+ for (_i135 = 0; _i135 < _size131; ++_i135)
+ {
+ xfer += this->rows[_i135].read(iprot);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.rows = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->num);
+ this->__isset.num = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+ xfer += this->schema.read(iprot);
+ this->__isset.schema = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_type)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t CqlResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("CqlResult");
+ xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_I32, 1);
+ xfer += oprot->writeI32((int32_t)this->type);
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.rows) {
+ xfer += oprot->writeFieldBegin("rows", ::apache::thrift::protocol::T_LIST, 2);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->rows.size()));
+ std::vector<CqlRow> ::const_iterator _iter136;
+ for (_iter136 = this->rows.begin(); _iter136 != this->rows.end(); ++_iter136)
+ {
+ xfer += (*_iter136).write(oprot);
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.num) {
+ xfer += oprot->writeFieldBegin("num", ::apache::thrift::protocol::T_I32, 3);
+ xfer += oprot->writeI32(this->num);
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.schema) {
+ xfer += oprot->writeFieldBegin("schema", ::apache::thrift::protocol::T_STRUCT, 4);
+ xfer += this->schema.write(oprot);
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+const char* CqlPreparedResult::ascii_fingerprint = "7E1663EC688DFDC28722BF36F9F64E6F";
+const uint8_t CqlPreparedResult::binary_fingerprint[16] = {0x7E,0x16,0x63,0xEC,0x68,0x8D,0xFD,0xC2,0x87,0x22,0xBF,0x36,0xF9,0xF6,0x4E,0x6F};
+
+uint32_t CqlPreparedResult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+ uint32_t xfer = 0;
+ std::string fname;
+ ::apache::thrift::protocol::TType ftype;
+ int16_t fid;
+
+ xfer += iprot->readStructBegin(fname);
+
+ using ::apache::thrift::protocol::TProtocolException;
+
+ bool isset_itemId = false;
+ bool isset_count = false;
+
+ while (true)
+ {
+ xfer += iprot->readFieldBegin(fname, ftype, fid);
+ if (ftype == ::apache::thrift::protocol::T_STOP) {
+ break;
+ }
+ switch (fid)
+ {
+ case 1:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->itemId);
+ isset_itemId = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 2:
+ if (ftype == ::apache::thrift::protocol::T_I32) {
+ xfer += iprot->readI32(this->count);
+ isset_count = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 3:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->variable_types.clear();
+ uint32_t _size137;
+ ::apache::thrift::protocol::TType _etype140;
+ iprot->readListBegin(_etype140, _size137);
+ this->variable_types.resize(_size137);
+ uint32_t _i141;
+ for (_i141 = 0; _i141 < _size137; ++_i141)
+ {
+ xfer += iprot->readString(this->variable_types[_i141]);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.variable_types = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ case 4:
+ if (ftype == ::apache::thrift::protocol::T_LIST) {
+ {
+ this->variable_names.clear();
+ uint32_t _size142;
+ ::apache::thrift::protocol::TType _etype145;
+ iprot->readListBegin(_etype145, _size142);
+ this->variable_names.resize(_size142);
+ uint32_t _i146;
+ for (_i146 = 0; _i146 < _size142; ++_i146)
+ {
+ xfer += iprot->readString(this->variable_names[_i146]);
+ }
+ iprot->readListEnd();
+ }
+ this->__isset.variable_names = true;
+ } else {
+ xfer += iprot->skip(ftype);
+ }
+ break;
+ default:
+ xfer += iprot->skip(ftype);
+ break;
+ }
+ xfer += iprot->readFieldEnd();
+ }
+
+ xfer += iprot->readStructEnd();
+
+ if (!isset_itemId)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ if (!isset_count)
+ throw TProtocolException(TProtocolException::INVALID_DATA);
+ return xfer;
+}
+
+uint32_t CqlPreparedResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
+ uint32_t xfer = 0;
+ xfer += oprot->writeStructBegin("CqlPreparedResult");
+ xfer += oprot->writeFieldBegin("itemId", ::apache::thrift::protocol::T_I32, 1);
+ xfer += oprot->writeI32(this->itemId);
+ xfer += oprot->writeFieldEnd();
+ xfer += oprot->writeFieldBegin("count", ::apache::thrift::protocol::T_I32, 2);
+ xfer += oprot->writeI32(this->count);
+ xfer += oprot->writeFieldEnd();
+ if (this->__isset.variable_types) {
+ xfer += oprot->writeFieldBegin("variable_types", ::apache::thrift::protocol::T_LIST, 3);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->variable_types.size()));
+ std::vector<std::string> ::const_iterator _iter147;
+ for (_iter147 = this->variable_types.begin(); _iter147 != this->variable_types.end(); ++_iter147)
+ {
+ xfer += oprot->writeString((*_iter147));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ if (this->__isset.variable_names) {
+ xfer += oprot->writeFieldBegin("variable_names", ::apache::thrift::protocol::T_LIST, 4);
+ {
+ xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->variable_names.size()));
+ std::vector<std::string> ::const_iterator _iter148;
+ for (_iter148 = this->variable_names.begin(); _iter148 != this->variable_names.end(); ++_iter148)
+ {
+ xfer += oprot->writeString((*_iter148));
+ }
+ xfer += oprot->writeListEnd();
+ }
+ xfer += oprot->writeFieldEnd();
+ }
+ xfer += oprot->writeFieldStop();
+ xfer += oprot->writeStructEnd();
+ return xfer;
+}
+
+}}} // namespace
diff --git a/storage/cassandra/gen-cpp/cassandra_types.h b/storage/cassandra/gen-cpp/cassandra_types.h
new file mode 100644
index 00000000000..d675198dcc8
--- /dev/null
+++ b/storage/cassandra/gen-cpp/cassandra_types.h
@@ -0,0 +1,2149 @@
+/**
+ * Autogenerated by Thrift Compiler (0.8.0)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ * @generated
+ */
+#ifndef cassandra_TYPES_H
+#define cassandra_TYPES_H
+
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <Thrift.h>
+#include <TApplicationException.h>
+#include <protocol/TProtocol.h>
+#include <transport/TTransport.h>
+
+
+
+namespace org { namespace apache { namespace cassandra {
+
+struct ConsistencyLevel {
+ enum type {
+ ONE = 1,
+ QUORUM = 2,
+ LOCAL_QUORUM = 3,
+ EACH_QUORUM = 4,
+ ALL = 5,
+ ANY = 6,
+ TWO = 7,
+ THREE = 8
+ };
+};
+
+extern const std::map<int, const char*> _ConsistencyLevel_VALUES_TO_NAMES;
+
+struct IndexOperator {
+ enum type {
+ EQ = 0,
+ GTE = 1,
+ GT = 2,
+ LTE = 3,
+ LT = 4
+ };
+};
+
+extern const std::map<int, const char*> _IndexOperator_VALUES_TO_NAMES;
+
+struct IndexType {
+ enum type {
+ KEYS = 0,
+ CUSTOM = 1
+ };
+};
+
+extern const std::map<int, const char*> _IndexType_VALUES_TO_NAMES;
+
+struct Compression {
+ enum type {
+ GZIP = 1,
+ NONE = 2
+ };
+};
+
+extern const std::map<int, const char*> _Compression_VALUES_TO_NAMES;
+
+struct CqlResultType {
+ enum type {
+ ROWS = 1,
+ VOID = 2,
+ INT = 3
+ };
+};
+
+extern const std::map<int, const char*> _CqlResultType_VALUES_TO_NAMES;
+
+typedef struct _Column__isset {
+ _Column__isset() : value(false), timestamp(false), ttl(false) {}
+ bool value;
+ bool timestamp;
+ bool ttl;
+} _Column__isset;
+
+class Column {
+ public:
+
+ static const char* ascii_fingerprint; // = "3EE0E1C5C844001B62F08125068292CC";
+ static const uint8_t binary_fingerprint[16]; // = {0x3E,0xE0,0xE1,0xC5,0xC8,0x44,0x00,0x1B,0x62,0xF0,0x81,0x25,0x06,0x82,0x92,0xCC};
+
+ Column() : name(""), value(""), timestamp(0), ttl(0) {
+ }
+
+ virtual ~Column() throw() {}
+
+ std::string name;
+ std::string value;
+ int64_t timestamp;
+ int32_t ttl;
+
+ _Column__isset __isset;
+
+ void __set_name(const std::string& val) {
+ name = val;
+ }
+
+ void __set_value(const std::string& val) {
+ value = val;
+ __isset.value = true;
+ }
+
+ void __set_timestamp(const int64_t val) {
+ timestamp = val;
+ __isset.timestamp = true;
+ }
+
+ void __set_ttl(const int32_t val) {
+ ttl = val;
+ __isset.ttl = true;
+ }
+
+ bool operator == (const Column & rhs) const
+ {
+ if (!(name == rhs.name))
+ return false;
+ if (__isset.value != rhs.__isset.value)
+ return false;
+ else if (__isset.value && !(value == rhs.value))
+ return false;
+ if (__isset.timestamp != rhs.__isset.timestamp)
+ return false;
+ else if (__isset.timestamp && !(timestamp == rhs.timestamp))
+ return false;
+ if (__isset.ttl != rhs.__isset.ttl)
+ return false;
+ else if (__isset.ttl && !(ttl == rhs.ttl))
+ return false;
+ return true;
+ }
+ bool operator != (const Column &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Column & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class SuperColumn {
+ public:
+
+ static const char* ascii_fingerprint; // = "470EFC558004E98D92D604898305C04E";
+ static const uint8_t binary_fingerprint[16]; // = {0x47,0x0E,0xFC,0x55,0x80,0x04,0xE9,0x8D,0x92,0xD6,0x04,0x89,0x83,0x05,0xC0,0x4E};
+
+ SuperColumn() : name("") {
+ }
+
+ virtual ~SuperColumn() throw() {}
+
+ std::string name;
+ std::vector<Column> columns;
+
+ void __set_name(const std::string& val) {
+ name = val;
+ }
+
+ void __set_columns(const std::vector<Column> & val) {
+ columns = val;
+ }
+
+ bool operator == (const SuperColumn & rhs) const
+ {
+ if (!(name == rhs.name))
+ return false;
+ if (!(columns == rhs.columns))
+ return false;
+ return true;
+ }
+ bool operator != (const SuperColumn &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const SuperColumn & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class CounterColumn {
+ public:
+
+ static const char* ascii_fingerprint; // = "1CCCF6FC31CFD1D61BBBB1BAF3590620";
+ static const uint8_t binary_fingerprint[16]; // = {0x1C,0xCC,0xF6,0xFC,0x31,0xCF,0xD1,0xD6,0x1B,0xBB,0xB1,0xBA,0xF3,0x59,0x06,0x20};
+
+ CounterColumn() : name(""), value(0) {
+ }
+
+ virtual ~CounterColumn() throw() {}
+
+ std::string name;
+ int64_t value;
+
+ void __set_name(const std::string& val) {
+ name = val;
+ }
+
+ void __set_value(const int64_t val) {
+ value = val;
+ }
+
+ bool operator == (const CounterColumn & rhs) const
+ {
+ if (!(name == rhs.name))
+ return false;
+ if (!(value == rhs.value))
+ return false;
+ return true;
+ }
+ bool operator != (const CounterColumn &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const CounterColumn & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class CounterSuperColumn {
+ public:
+
+ static const char* ascii_fingerprint; // = "CD4C8C4BF7753E46DE417CDE369343A4";
+ static const uint8_t binary_fingerprint[16]; // = {0xCD,0x4C,0x8C,0x4B,0xF7,0x75,0x3E,0x46,0xDE,0x41,0x7C,0xDE,0x36,0x93,0x43,0xA4};
+
+ CounterSuperColumn() : name("") {
+ }
+
+ virtual ~CounterSuperColumn() throw() {}
+
+ std::string name;
+ std::vector<CounterColumn> columns;
+
+ void __set_name(const std::string& val) {
+ name = val;
+ }
+
+ void __set_columns(const std::vector<CounterColumn> & val) {
+ columns = val;
+ }
+
+ bool operator == (const CounterSuperColumn & rhs) const
+ {
+ if (!(name == rhs.name))
+ return false;
+ if (!(columns == rhs.columns))
+ return false;
+ return true;
+ }
+ bool operator != (const CounterSuperColumn &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const CounterSuperColumn & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ColumnOrSuperColumn__isset {
+ _ColumnOrSuperColumn__isset() : column(false), super_column(false), counter_column(false), counter_super_column(false) {}
+ bool column;
+ bool super_column;
+ bool counter_column;
+ bool counter_super_column;
+} _ColumnOrSuperColumn__isset;
+
+class ColumnOrSuperColumn {
+ public:
+
+ static const char* ascii_fingerprint; // = "2B34AC9E80F1DAA3A2A63B1AB1841E61";
+ static const uint8_t binary_fingerprint[16]; // = {0x2B,0x34,0xAC,0x9E,0x80,0xF1,0xDA,0xA3,0xA2,0xA6,0x3B,0x1A,0xB1,0x84,0x1E,0x61};
+
+ ColumnOrSuperColumn() {
+ }
+
+ virtual ~ColumnOrSuperColumn() throw() {}
+
+ Column column;
+ SuperColumn super_column;
+ CounterColumn counter_column;
+ CounterSuperColumn counter_super_column;
+
+ _ColumnOrSuperColumn__isset __isset;
+
+ void __set_column(const Column& val) {
+ column = val;
+ __isset.column = true;
+ }
+
+ void __set_super_column(const SuperColumn& val) {
+ super_column = val;
+ __isset.super_column = true;
+ }
+
+ void __set_counter_column(const CounterColumn& val) {
+ counter_column = val;
+ __isset.counter_column = true;
+ }
+
+ void __set_counter_super_column(const CounterSuperColumn& val) {
+ counter_super_column = val;
+ __isset.counter_super_column = true;
+ }
+
+ bool operator == (const ColumnOrSuperColumn & rhs) const
+ {
+ if (__isset.column != rhs.__isset.column)
+ return false;
+ else if (__isset.column && !(column == rhs.column))
+ return false;
+ if (__isset.super_column != rhs.__isset.super_column)
+ return false;
+ else if (__isset.super_column && !(super_column == rhs.super_column))
+ return false;
+ if (__isset.counter_column != rhs.__isset.counter_column)
+ return false;
+ else if (__isset.counter_column && !(counter_column == rhs.counter_column))
+ return false;
+ if (__isset.counter_super_column != rhs.__isset.counter_super_column)
+ return false;
+ else if (__isset.counter_super_column && !(counter_super_column == rhs.counter_super_column))
+ return false;
+ return true;
+ }
+ bool operator != (const ColumnOrSuperColumn &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ColumnOrSuperColumn & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class NotFoundException : public ::apache::thrift::TException {
+ public:
+
+ static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B";
+ static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
+
+ NotFoundException() {
+ }
+
+ virtual ~NotFoundException() throw() {}
+
+
+ bool operator == (const NotFoundException & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const NotFoundException &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const NotFoundException & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class InvalidRequestException : public ::apache::thrift::TException {
+ public:
+
+ static const char* ascii_fingerprint; // = "EFB929595D312AC8F305D5A794CFEDA1";
+ static const uint8_t binary_fingerprint[16]; // = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
+
+ InvalidRequestException() : why("") {
+ }
+
+ virtual ~InvalidRequestException() throw() {}
+
+ std::string why;
+
+ void __set_why(const std::string& val) {
+ why = val;
+ }
+
+ bool operator == (const InvalidRequestException & rhs) const
+ {
+ if (!(why == rhs.why))
+ return false;
+ return true;
+ }
+ bool operator != (const InvalidRequestException &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const InvalidRequestException & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class UnavailableException : public ::apache::thrift::TException {
+ public:
+
+ static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B";
+ static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
+
+ UnavailableException() {
+ }
+
+ virtual ~UnavailableException() throw() {}
+
+
+ bool operator == (const UnavailableException & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const UnavailableException &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const UnavailableException & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class TimedOutException : public ::apache::thrift::TException {
+ public:
+
+ static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B";
+ static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
+
+ TimedOutException() {
+ }
+
+ virtual ~TimedOutException() throw() {}
+
+
+ bool operator == (const TimedOutException & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const TimedOutException &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const TimedOutException & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class AuthenticationException : public ::apache::thrift::TException {
+ public:
+
+ static const char* ascii_fingerprint; // = "EFB929595D312AC8F305D5A794CFEDA1";
+ static const uint8_t binary_fingerprint[16]; // = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
+
+ AuthenticationException() : why("") {
+ }
+
+ virtual ~AuthenticationException() throw() {}
+
+ std::string why;
+
+ void __set_why(const std::string& val) {
+ why = val;
+ }
+
+ bool operator == (const AuthenticationException & rhs) const
+ {
+ if (!(why == rhs.why))
+ return false;
+ return true;
+ }
+ bool operator != (const AuthenticationException &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const AuthenticationException & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class AuthorizationException : public ::apache::thrift::TException {
+ public:
+
+ static const char* ascii_fingerprint; // = "EFB929595D312AC8F305D5A794CFEDA1";
+ static const uint8_t binary_fingerprint[16]; // = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
+
+ AuthorizationException() : why("") {
+ }
+
+ virtual ~AuthorizationException() throw() {}
+
+ std::string why;
+
+ void __set_why(const std::string& val) {
+ why = val;
+ }
+
+ bool operator == (const AuthorizationException & rhs) const
+ {
+ if (!(why == rhs.why))
+ return false;
+ return true;
+ }
+ bool operator != (const AuthorizationException &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const AuthorizationException & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class SchemaDisagreementException : public ::apache::thrift::TException {
+ public:
+
+ static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B";
+ static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
+
+ SchemaDisagreementException() {
+ }
+
+ virtual ~SchemaDisagreementException() throw() {}
+
+
+ bool operator == (const SchemaDisagreementException & /* rhs */) const
+ {
+ return true;
+ }
+ bool operator != (const SchemaDisagreementException &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const SchemaDisagreementException & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ColumnParent__isset {
+ _ColumnParent__isset() : super_column(false) {}
+ bool super_column;
+} _ColumnParent__isset;
+
+class ColumnParent {
+ public:
+
+ static const char* ascii_fingerprint; // = "0A13AE61181713A4100DFFB3EC293822";
+ static const uint8_t binary_fingerprint[16]; // = {0x0A,0x13,0xAE,0x61,0x18,0x17,0x13,0xA4,0x10,0x0D,0xFF,0xB3,0xEC,0x29,0x38,0x22};
+
+ ColumnParent() : column_family(""), super_column("") {
+ }
+
+ virtual ~ColumnParent() throw() {}
+
+ std::string column_family;
+ std::string super_column;
+
+ _ColumnParent__isset __isset;
+
+ void __set_column_family(const std::string& val) {
+ column_family = val;
+ }
+
+ void __set_super_column(const std::string& val) {
+ super_column = val;
+ __isset.super_column = true;
+ }
+
+ bool operator == (const ColumnParent & rhs) const
+ {
+ if (!(column_family == rhs.column_family))
+ return false;
+ if (__isset.super_column != rhs.__isset.super_column)
+ return false;
+ else if (__isset.super_column && !(super_column == rhs.super_column))
+ return false;
+ return true;
+ }
+ bool operator != (const ColumnParent &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ColumnParent & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ColumnPath__isset {
+ _ColumnPath__isset() : super_column(false), column(false) {}
+ bool super_column;
+ bool column;
+} _ColumnPath__isset;
+
+class ColumnPath {
+ public:
+
+ static const char* ascii_fingerprint; // = "606212895BCF63C757913CF35AEB3462";
+ static const uint8_t binary_fingerprint[16]; // = {0x60,0x62,0x12,0x89,0x5B,0xCF,0x63,0xC7,0x57,0x91,0x3C,0xF3,0x5A,0xEB,0x34,0x62};
+
+ ColumnPath() : column_family(""), super_column(""), column("") {
+ }
+
+ virtual ~ColumnPath() throw() {}
+
+ std::string column_family;
+ std::string super_column;
+ std::string column;
+
+ _ColumnPath__isset __isset;
+
+ void __set_column_family(const std::string& val) {
+ column_family = val;
+ }
+
+ void __set_super_column(const std::string& val) {
+ super_column = val;
+ __isset.super_column = true;
+ }
+
+ void __set_column(const std::string& val) {
+ column = val;
+ __isset.column = true;
+ }
+
+ bool operator == (const ColumnPath & rhs) const
+ {
+ if (!(column_family == rhs.column_family))
+ return false;
+ if (__isset.super_column != rhs.__isset.super_column)
+ return false;
+ else if (__isset.super_column && !(super_column == rhs.super_column))
+ return false;
+ if (__isset.column != rhs.__isset.column)
+ return false;
+ else if (__isset.column && !(column == rhs.column))
+ return false;
+ return true;
+ }
+ bool operator != (const ColumnPath &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ColumnPath & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class SliceRange {
+ public:
+
+ static const char* ascii_fingerprint; // = "184D24C9A0B8D4415E234DB649CAE740";
+ static const uint8_t binary_fingerprint[16]; // = {0x18,0x4D,0x24,0xC9,0xA0,0xB8,0xD4,0x41,0x5E,0x23,0x4D,0xB6,0x49,0xCA,0xE7,0x40};
+
+ SliceRange() : start(""), finish(""), reversed(false), count(100) {
+ }
+
+ virtual ~SliceRange() throw() {}
+
+ std::string start;
+ std::string finish;
+ bool reversed;
+ int32_t count;
+
+ void __set_start(const std::string& val) {
+ start = val;
+ }
+
+ void __set_finish(const std::string& val) {
+ finish = val;
+ }
+
+ void __set_reversed(const bool val) {
+ reversed = val;
+ }
+
+ void __set_count(const int32_t val) {
+ count = val;
+ }
+
+ bool operator == (const SliceRange & rhs) const
+ {
+ if (!(start == rhs.start))
+ return false;
+ if (!(finish == rhs.finish))
+ return false;
+ if (!(reversed == rhs.reversed))
+ return false;
+ if (!(count == rhs.count))
+ return false;
+ return true;
+ }
+ bool operator != (const SliceRange &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const SliceRange & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _SlicePredicate__isset {
+ _SlicePredicate__isset() : column_names(false), slice_range(false) {}
+ bool column_names;
+ bool slice_range;
+} _SlicePredicate__isset;
+
+class SlicePredicate {
+ public:
+
+ static const char* ascii_fingerprint; // = "F59D1D81C17DFFAF09988BF1C9CE5E27";
+ static const uint8_t binary_fingerprint[16]; // = {0xF5,0x9D,0x1D,0x81,0xC1,0x7D,0xFF,0xAF,0x09,0x98,0x8B,0xF1,0xC9,0xCE,0x5E,0x27};
+
+ SlicePredicate() {
+ }
+
+ virtual ~SlicePredicate() throw() {}
+
+ std::vector<std::string> column_names;
+ SliceRange slice_range;
+
+ _SlicePredicate__isset __isset;
+
+ void __set_column_names(const std::vector<std::string> & val) {
+ column_names = val;
+ __isset.column_names = true;
+ }
+
+ void __set_slice_range(const SliceRange& val) {
+ slice_range = val;
+ __isset.slice_range = true;
+ }
+
+ bool operator == (const SlicePredicate & rhs) const
+ {
+ if (__isset.column_names != rhs.__isset.column_names)
+ return false;
+ else if (__isset.column_names && !(column_names == rhs.column_names))
+ return false;
+ if (__isset.slice_range != rhs.__isset.slice_range)
+ return false;
+ else if (__isset.slice_range && !(slice_range == rhs.slice_range))
+ return false;
+ return true;
+ }
+ bool operator != (const SlicePredicate &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const SlicePredicate & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class IndexExpression {
+ public:
+
+ static const char* ascii_fingerprint; // = "D9F4CFE2F293A8B1052FD3031DD2C847";
+ static const uint8_t binary_fingerprint[16]; // = {0xD9,0xF4,0xCF,0xE2,0xF2,0x93,0xA8,0xB1,0x05,0x2F,0xD3,0x03,0x1D,0xD2,0xC8,0x47};
+
+ IndexExpression() : column_name(""), op((IndexOperator::type)0), value("") {
+ }
+
+ virtual ~IndexExpression() throw() {}
+
+ std::string column_name;
+ IndexOperator::type op;
+ std::string value;
+
+ void __set_column_name(const std::string& val) {
+ column_name = val;
+ }
+
+ void __set_op(const IndexOperator::type val) {
+ op = val;
+ }
+
+ void __set_value(const std::string& val) {
+ value = val;
+ }
+
+ bool operator == (const IndexExpression & rhs) const
+ {
+ if (!(column_name == rhs.column_name))
+ return false;
+ if (!(op == rhs.op))
+ return false;
+ if (!(value == rhs.value))
+ return false;
+ return true;
+ }
+ bool operator != (const IndexExpression &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const IndexExpression & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class IndexClause {
+ public:
+
+ static const char* ascii_fingerprint; // = "9B551B9AB86120B0EEA9005C77FD3C1F";
+ static const uint8_t binary_fingerprint[16]; // = {0x9B,0x55,0x1B,0x9A,0xB8,0x61,0x20,0xB0,0xEE,0xA9,0x00,0x5C,0x77,0xFD,0x3C,0x1F};
+
+ IndexClause() : start_key(""), count(100) {
+ }
+
+ virtual ~IndexClause() throw() {}
+
+ std::vector<IndexExpression> expressions;
+ std::string start_key;
+ int32_t count;
+
+ void __set_expressions(const std::vector<IndexExpression> & val) {
+ expressions = val;
+ }
+
+ void __set_start_key(const std::string& val) {
+ start_key = val;
+ }
+
+ void __set_count(const int32_t val) {
+ count = val;
+ }
+
+ bool operator == (const IndexClause & rhs) const
+ {
+ if (!(expressions == rhs.expressions))
+ return false;
+ if (!(start_key == rhs.start_key))
+ return false;
+ if (!(count == rhs.count))
+ return false;
+ return true;
+ }
+ bool operator != (const IndexClause &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const IndexClause & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _KeyRange__isset {
+ _KeyRange__isset() : start_key(false), end_key(false), start_token(false), end_token(false), row_filter(false) {}
+ bool start_key;
+ bool end_key;
+ bool start_token;
+ bool end_token;
+ bool row_filter;
+} _KeyRange__isset;
+
+class KeyRange {
+ public:
+
+ static const char* ascii_fingerprint; // = "A6EC82FA0980B91C7C8EB013C61CA1B0";
+ static const uint8_t binary_fingerprint[16]; // = {0xA6,0xEC,0x82,0xFA,0x09,0x80,0xB9,0x1C,0x7C,0x8E,0xB0,0x13,0xC6,0x1C,0xA1,0xB0};
+
+ KeyRange() : start_key(""), end_key(""), start_token(""), end_token(""), count(100) {
+ }
+
+ virtual ~KeyRange() throw() {}
+
+ std::string start_key;
+ std::string end_key;
+ std::string start_token;
+ std::string end_token;
+ std::vector<IndexExpression> row_filter;
+ int32_t count;
+
+ _KeyRange__isset __isset;
+
+ void __set_start_key(const std::string& val) {
+ start_key = val;
+ __isset.start_key = true;
+ }
+
+ void __set_end_key(const std::string& val) {
+ end_key = val;
+ __isset.end_key = true;
+ }
+
+ void __set_start_token(const std::string& val) {
+ start_token = val;
+ __isset.start_token = true;
+ }
+
+ void __set_end_token(const std::string& val) {
+ end_token = val;
+ __isset.end_token = true;
+ }
+
+ void __set_row_filter(const std::vector<IndexExpression> & val) {
+ row_filter = val;
+ __isset.row_filter = true;
+ }
+
+ void __set_count(const int32_t val) {
+ count = val;
+ }
+
+ bool operator == (const KeyRange & rhs) const
+ {
+ if (__isset.start_key != rhs.__isset.start_key)
+ return false;
+ else if (__isset.start_key && !(start_key == rhs.start_key))
+ return false;
+ if (__isset.end_key != rhs.__isset.end_key)
+ return false;
+ else if (__isset.end_key && !(end_key == rhs.end_key))
+ return false;
+ if (__isset.start_token != rhs.__isset.start_token)
+ return false;
+ else if (__isset.start_token && !(start_token == rhs.start_token))
+ return false;
+ if (__isset.end_token != rhs.__isset.end_token)
+ return false;
+ else if (__isset.end_token && !(end_token == rhs.end_token))
+ return false;
+ if (__isset.row_filter != rhs.__isset.row_filter)
+ return false;
+ else if (__isset.row_filter && !(row_filter == rhs.row_filter))
+ return false;
+ if (!(count == rhs.count))
+ return false;
+ return true;
+ }
+ bool operator != (const KeyRange &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const KeyRange & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class KeySlice {
+ public:
+
+ static const char* ascii_fingerprint; // = "D1568675B0C135C909E3169B72A4DA3D";
+ static const uint8_t binary_fingerprint[16]; // = {0xD1,0x56,0x86,0x75,0xB0,0xC1,0x35,0xC9,0x09,0xE3,0x16,0x9B,0x72,0xA4,0xDA,0x3D};
+
+ KeySlice() : key("") {
+ }
+
+ virtual ~KeySlice() throw() {}
+
+ std::string key;
+ std::vector<ColumnOrSuperColumn> columns;
+
+ void __set_key(const std::string& val) {
+ key = val;
+ }
+
+ void __set_columns(const std::vector<ColumnOrSuperColumn> & val) {
+ columns = val;
+ }
+
+ bool operator == (const KeySlice & rhs) const
+ {
+ if (!(key == rhs.key))
+ return false;
+ if (!(columns == rhs.columns))
+ return false;
+ return true;
+ }
+ bool operator != (const KeySlice &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const KeySlice & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class KeyCount {
+ public:
+
+ static const char* ascii_fingerprint; // = "EEBC915CE44901401D881E6091423036";
+ static const uint8_t binary_fingerprint[16]; // = {0xEE,0xBC,0x91,0x5C,0xE4,0x49,0x01,0x40,0x1D,0x88,0x1E,0x60,0x91,0x42,0x30,0x36};
+
+ KeyCount() : key(""), count(0) {
+ }
+
+ virtual ~KeyCount() throw() {}
+
+ std::string key;
+ int32_t count;
+
+ void __set_key(const std::string& val) {
+ key = val;
+ }
+
+ void __set_count(const int32_t val) {
+ count = val;
+ }
+
+ bool operator == (const KeyCount & rhs) const
+ {
+ if (!(key == rhs.key))
+ return false;
+ if (!(count == rhs.count))
+ return false;
+ return true;
+ }
+ bool operator != (const KeyCount &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const KeyCount & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Deletion__isset {
+ _Deletion__isset() : timestamp(false), super_column(false), predicate(false) {}
+ bool timestamp;
+ bool super_column;
+ bool predicate;
+} _Deletion__isset;
+
+class Deletion {
+ public:
+
+ static const char* ascii_fingerprint; // = "40F33ECF1C932CA77C2414C4E6C60CBE";
+ static const uint8_t binary_fingerprint[16]; // = {0x40,0xF3,0x3E,0xCF,0x1C,0x93,0x2C,0xA7,0x7C,0x24,0x14,0xC4,0xE6,0xC6,0x0C,0xBE};
+
+ Deletion() : timestamp(0), super_column("") {
+ }
+
+ virtual ~Deletion() throw() {}
+
+ int64_t timestamp;
+ std::string super_column;
+ SlicePredicate predicate;
+
+ _Deletion__isset __isset;
+
+ void __set_timestamp(const int64_t val) {
+ timestamp = val;
+ __isset.timestamp = true;
+ }
+
+ void __set_super_column(const std::string& val) {
+ super_column = val;
+ __isset.super_column = true;
+ }
+
+ void __set_predicate(const SlicePredicate& val) {
+ predicate = val;
+ __isset.predicate = true;
+ }
+
+ bool operator == (const Deletion & rhs) const
+ {
+ if (__isset.timestamp != rhs.__isset.timestamp)
+ return false;
+ else if (__isset.timestamp && !(timestamp == rhs.timestamp))
+ return false;
+ if (__isset.super_column != rhs.__isset.super_column)
+ return false;
+ else if (__isset.super_column && !(super_column == rhs.super_column))
+ return false;
+ if (__isset.predicate != rhs.__isset.predicate)
+ return false;
+ else if (__isset.predicate && !(predicate == rhs.predicate))
+ return false;
+ return true;
+ }
+ bool operator != (const Deletion &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Deletion & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _Mutation__isset {
+ _Mutation__isset() : column_or_supercolumn(false), deletion(false) {}
+ bool column_or_supercolumn;
+ bool deletion;
+} _Mutation__isset;
+
+class Mutation {
+ public:
+
+ static const char* ascii_fingerprint; // = "E8B65DF3979C6868F80DF81F8E769E63";
+ static const uint8_t binary_fingerprint[16]; // = {0xE8,0xB6,0x5D,0xF3,0x97,0x9C,0x68,0x68,0xF8,0x0D,0xF8,0x1F,0x8E,0x76,0x9E,0x63};
+
+ Mutation() {
+ }
+
+ virtual ~Mutation() throw() {}
+
+ ColumnOrSuperColumn column_or_supercolumn;
+ Deletion deletion;
+
+ _Mutation__isset __isset;
+
+ void __set_column_or_supercolumn(const ColumnOrSuperColumn& val) {
+ column_or_supercolumn = val;
+ __isset.column_or_supercolumn = true;
+ }
+
+ void __set_deletion(const Deletion& val) {
+ deletion = val;
+ __isset.deletion = true;
+ }
+
+ bool operator == (const Mutation & rhs) const
+ {
+ if (__isset.column_or_supercolumn != rhs.__isset.column_or_supercolumn)
+ return false;
+ else if (__isset.column_or_supercolumn && !(column_or_supercolumn == rhs.column_or_supercolumn))
+ return false;
+ if (__isset.deletion != rhs.__isset.deletion)
+ return false;
+ else if (__isset.deletion && !(deletion == rhs.deletion))
+ return false;
+ return true;
+ }
+ bool operator != (const Mutation &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const Mutation & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _EndpointDetails__isset {
+ _EndpointDetails__isset() : host(false), datacenter(false), rack(false) {}
+ bool host;
+ bool datacenter;
+ bool rack;
+} _EndpointDetails__isset;
+
+class EndpointDetails {
+ public:
+
+ static const char* ascii_fingerprint; // = "F4A50F0EC638C7F66026F9B6678FD89B";
+ static const uint8_t binary_fingerprint[16]; // = {0xF4,0xA5,0x0F,0x0E,0xC6,0x38,0xC7,0xF6,0x60,0x26,0xF9,0xB6,0x67,0x8F,0xD8,0x9B};
+
+ EndpointDetails() : host(""), datacenter(""), rack("") {
+ }
+
+ virtual ~EndpointDetails() throw() {}
+
+ std::string host;
+ std::string datacenter;
+ std::string rack;
+
+ _EndpointDetails__isset __isset;
+
+ void __set_host(const std::string& val) {
+ host = val;
+ }
+
+ void __set_datacenter(const std::string& val) {
+ datacenter = val;
+ }
+
+ void __set_rack(const std::string& val) {
+ rack = val;
+ __isset.rack = true;
+ }
+
+ bool operator == (const EndpointDetails & rhs) const
+ {
+ if (!(host == rhs.host))
+ return false;
+ if (!(datacenter == rhs.datacenter))
+ return false;
+ if (__isset.rack != rhs.__isset.rack)
+ return false;
+ else if (__isset.rack && !(rack == rhs.rack))
+ return false;
+ return true;
+ }
+ bool operator != (const EndpointDetails &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const EndpointDetails & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _TokenRange__isset {
+ _TokenRange__isset() : rpc_endpoints(false), endpoint_details(false) {}
+ bool rpc_endpoints;
+ bool endpoint_details;
+} _TokenRange__isset;
+
+class TokenRange {
+ public:
+
+ static const char* ascii_fingerprint; // = "832268DC4CD6B17EE8881FC57EA04679";
+ static const uint8_t binary_fingerprint[16]; // = {0x83,0x22,0x68,0xDC,0x4C,0xD6,0xB1,0x7E,0xE8,0x88,0x1F,0xC5,0x7E,0xA0,0x46,0x79};
+
+ TokenRange() : start_token(""), end_token("") {
+ }
+
+ virtual ~TokenRange() throw() {}
+
+ std::string start_token;
+ std::string end_token;
+ std::vector<std::string> endpoints;
+ std::vector<std::string> rpc_endpoints;
+ std::vector<EndpointDetails> endpoint_details;
+
+ _TokenRange__isset __isset;
+
+ void __set_start_token(const std::string& val) {
+ start_token = val;
+ }
+
+ void __set_end_token(const std::string& val) {
+ end_token = val;
+ }
+
+ void __set_endpoints(const std::vector<std::string> & val) {
+ endpoints = val;
+ }
+
+ void __set_rpc_endpoints(const std::vector<std::string> & val) {
+ rpc_endpoints = val;
+ __isset.rpc_endpoints = true;
+ }
+
+ void __set_endpoint_details(const std::vector<EndpointDetails> & val) {
+ endpoint_details = val;
+ __isset.endpoint_details = true;
+ }
+
+ bool operator == (const TokenRange & rhs) const
+ {
+ if (!(start_token == rhs.start_token))
+ return false;
+ if (!(end_token == rhs.end_token))
+ return false;
+ if (!(endpoints == rhs.endpoints))
+ return false;
+ if (__isset.rpc_endpoints != rhs.__isset.rpc_endpoints)
+ return false;
+ else if (__isset.rpc_endpoints && !(rpc_endpoints == rhs.rpc_endpoints))
+ return false;
+ if (__isset.endpoint_details != rhs.__isset.endpoint_details)
+ return false;
+ else if (__isset.endpoint_details && !(endpoint_details == rhs.endpoint_details))
+ return false;
+ return true;
+ }
+ bool operator != (const TokenRange &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const TokenRange & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class AuthenticationRequest {
+ public:
+
+ static const char* ascii_fingerprint; // = "5EA2D527ECA3BA20C77AFC023EE8C05F";
+ static const uint8_t binary_fingerprint[16]; // = {0x5E,0xA2,0xD5,0x27,0xEC,0xA3,0xBA,0x20,0xC7,0x7A,0xFC,0x02,0x3E,0xE8,0xC0,0x5F};
+
+ AuthenticationRequest() {
+ }
+
+ virtual ~AuthenticationRequest() throw() {}
+
+ std::map<std::string, std::string> credentials;
+
+ void __set_credentials(const std::map<std::string, std::string> & val) {
+ credentials = val;
+ }
+
+ bool operator == (const AuthenticationRequest & rhs) const
+ {
+ if (!(credentials == rhs.credentials))
+ return false;
+ return true;
+ }
+ bool operator != (const AuthenticationRequest &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const AuthenticationRequest & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ColumnDef__isset {
+ _ColumnDef__isset() : index_type(false), index_name(false), index_options(false) {}
+ bool index_type;
+ bool index_name;
+ bool index_options;
+} _ColumnDef__isset;
+
+class ColumnDef {
+ public:
+
+ static const char* ascii_fingerprint; // = "0D89CE83D7EDAD079AC3213ED1DCAA58";
+ static const uint8_t binary_fingerprint[16]; // = {0x0D,0x89,0xCE,0x83,0xD7,0xED,0xAD,0x07,0x9A,0xC3,0x21,0x3E,0xD1,0xDC,0xAA,0x58};
+
+ ColumnDef() : name(""), validation_class(""), index_type((IndexType::type)0), index_name("") {
+ }
+
+ virtual ~ColumnDef() throw() {}
+
+ std::string name;
+ std::string validation_class;
+ IndexType::type index_type;
+ std::string index_name;
+ std::map<std::string, std::string> index_options;
+
+ _ColumnDef__isset __isset;
+
+ void __set_name(const std::string& val) {
+ name = val;
+ }
+
+ void __set_validation_class(const std::string& val) {
+ validation_class = val;
+ }
+
+ void __set_index_type(const IndexType::type val) {
+ index_type = val;
+ __isset.index_type = true;
+ }
+
+ void __set_index_name(const std::string& val) {
+ index_name = val;
+ __isset.index_name = true;
+ }
+
+ void __set_index_options(const std::map<std::string, std::string> & val) {
+ index_options = val;
+ __isset.index_options = true;
+ }
+
+ bool operator == (const ColumnDef & rhs) const
+ {
+ if (!(name == rhs.name))
+ return false;
+ if (!(validation_class == rhs.validation_class))
+ return false;
+ if (__isset.index_type != rhs.__isset.index_type)
+ return false;
+ else if (__isset.index_type && !(index_type == rhs.index_type))
+ return false;
+ if (__isset.index_name != rhs.__isset.index_name)
+ return false;
+ else if (__isset.index_name && !(index_name == rhs.index_name))
+ return false;
+ if (__isset.index_options != rhs.__isset.index_options)
+ return false;
+ else if (__isset.index_options && !(index_options == rhs.index_options))
+ return false;
+ return true;
+ }
+ bool operator != (const ColumnDef &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const ColumnDef & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _CfDef__isset {
+ _CfDef__isset() : column_type(false), comparator_type(false), subcomparator_type(false), comment(false), read_repair_chance(false), column_metadata(false), gc_grace_seconds(false), default_validation_class(false), id(false), min_compaction_threshold(false), max_compaction_threshold(false), replicate_on_write(false), key_validation_class(false), key_alias(false), compaction_strategy(false), compaction_strategy_options(false), compression_options(false), bloom_filter_fp_chance(false), caching(false), dclocal_read_repair_chance(false), row_cache_size(false), key_cache_size(false), row_cache_save_period_in_seconds(false), key_cache_save_period_in_seconds(false), memtable_flush_after_mins(false), memtable_throughput_in_mb(false), memtable_operations_in_millions(false), merge_shards_chance(false), row_cache_provider(false), row_cache_keys_to_save(false) {}
+ bool column_type;
+ bool comparator_type;
+ bool subcomparator_type;
+ bool comment;
+ bool read_repair_chance;
+ bool column_metadata;
+ bool gc_grace_seconds;
+ bool default_validation_class;
+ bool id;
+ bool min_compaction_threshold;
+ bool max_compaction_threshold;
+ bool replicate_on_write;
+ bool key_validation_class;
+ bool key_alias;
+ bool compaction_strategy;
+ bool compaction_strategy_options;
+ bool compression_options;
+ bool bloom_filter_fp_chance;
+ bool caching;
+ bool dclocal_read_repair_chance;
+ bool row_cache_size;
+ bool key_cache_size;
+ bool row_cache_save_period_in_seconds;
+ bool key_cache_save_period_in_seconds;
+ bool memtable_flush_after_mins;
+ bool memtable_throughput_in_mb;
+ bool memtable_operations_in_millions;
+ bool merge_shards_chance;
+ bool row_cache_provider;
+ bool row_cache_keys_to_save;
+} _CfDef__isset;
+
+class CfDef {
+ public:
+
+ static const char* ascii_fingerprint; // = "231A260521B5DD99EFBCCBDD8768CA7D";
+ static const uint8_t binary_fingerprint[16]; // = {0x23,0x1A,0x26,0x05,0x21,0xB5,0xDD,0x99,0xEF,0xBC,0xCB,0xDD,0x87,0x68,0xCA,0x7D};
+
+ CfDef() : keyspace(""), name(""), column_type("Standard"), comparator_type("BytesType"), subcomparator_type(""), comment(""), read_repair_chance(0), gc_grace_seconds(0), default_validation_class(""), id(0), min_compaction_threshold(0), max_compaction_threshold(0), replicate_on_write(0), key_validation_class(""), key_alias(""), compaction_strategy(""), bloom_filter_fp_chance(0), caching("keys_only"), dclocal_read_repair_chance(0), row_cache_size(0), key_cache_size(0), row_cache_save_period_in_seconds(0), key_cache_save_period_in_seconds(0), memtable_flush_after_mins(0), memtable_throughput_in_mb(0), memtable_operations_in_millions(0), merge_shards_chance(0), row_cache_provider(""), row_cache_keys_to_save(0) {
+ }
+
+ virtual ~CfDef() throw() {}
+
+ std::string keyspace;
+ std::string name;
+ std::string column_type;
+ std::string comparator_type;
+ std::string subcomparator_type;
+ std::string comment;
+ double read_repair_chance;
+ std::vector<ColumnDef> column_metadata;
+ int32_t gc_grace_seconds;
+ std::string default_validation_class;
+ int32_t id;
+ int32_t min_compaction_threshold;
+ int32_t max_compaction_threshold;
+ bool replicate_on_write;
+ std::string key_validation_class;
+ std::string key_alias;
+ std::string compaction_strategy;
+ std::map<std::string, std::string> compaction_strategy_options;
+ std::map<std::string, std::string> compression_options;
+ double bloom_filter_fp_chance;
+ std::string caching;
+ double dclocal_read_repair_chance;
+ double row_cache_size;
+ double key_cache_size;
+ int32_t row_cache_save_period_in_seconds;
+ int32_t key_cache_save_period_in_seconds;
+ int32_t memtable_flush_after_mins;
+ int32_t memtable_throughput_in_mb;
+ double memtable_operations_in_millions;
+ double merge_shards_chance;
+ std::string row_cache_provider;
+ int32_t row_cache_keys_to_save;
+
+ _CfDef__isset __isset;
+
+ void __set_keyspace(const std::string& val) {
+ keyspace = val;
+ }
+
+ void __set_name(const std::string& val) {
+ name = val;
+ }
+
+ void __set_column_type(const std::string& val) {
+ column_type = val;
+ __isset.column_type = true;
+ }
+
+ void __set_comparator_type(const std::string& val) {
+ comparator_type = val;
+ __isset.comparator_type = true;
+ }
+
+ void __set_subcomparator_type(const std::string& val) {
+ subcomparator_type = val;
+ __isset.subcomparator_type = true;
+ }
+
+ void __set_comment(const std::string& val) {
+ comment = val;
+ __isset.comment = true;
+ }
+
+ void __set_read_repair_chance(const double val) {
+ read_repair_chance = val;
+ __isset.read_repair_chance = true;
+ }
+
+ void __set_column_metadata(const std::vector<ColumnDef> & val) {
+ column_metadata = val;
+ __isset.column_metadata = true;
+ }
+
+ void __set_gc_grace_seconds(const int32_t val) {
+ gc_grace_seconds = val;
+ __isset.gc_grace_seconds = true;
+ }
+
+ void __set_default_validation_class(const std::string& val) {
+ default_validation_class = val;
+ __isset.default_validation_class = true;
+ }
+
+ void __set_id(const int32_t val) {
+ id = val;
+ __isset.id = true;
+ }
+
+ void __set_min_compaction_threshold(const int32_t val) {
+ min_compaction_threshold = val;
+ __isset.min_compaction_threshold = true;
+ }
+
+ void __set_max_compaction_threshold(const int32_t val) {
+ max_compaction_threshold = val;
+ __isset.max_compaction_threshold = true;
+ }
+
+ void __set_replicate_on_write(const bool val) {
+ replicate_on_write = val;
+ __isset.replicate_on_write = true;
+ }
+
+ void __set_key_validation_class(const std::string& val) {
+ key_validation_class = val;
+ __isset.key_validation_class = true;
+ }
+
+ void __set_key_alias(const std::string& val) {
+ key_alias = val;
+ __isset.key_alias = true;
+ }
+
+ void __set_compaction_strategy(const std::string& val) {
+ compaction_strategy = val;
+ __isset.compaction_strategy = true;
+ }
+
+ void __set_compaction_strategy_options(const std::map<std::string, std::string> & val) {
+ compaction_strategy_options = val;
+ __isset.compaction_strategy_options = true;
+ }
+
+ void __set_compression_options(const std::map<std::string, std::string> & val) {
+ compression_options = val;
+ __isset.compression_options = true;
+ }
+
+ void __set_bloom_filter_fp_chance(const double val) {
+ bloom_filter_fp_chance = val;
+ __isset.bloom_filter_fp_chance = true;
+ }
+
+ void __set_caching(const std::string& val) {
+ caching = val;
+ __isset.caching = true;
+ }
+
+ void __set_dclocal_read_repair_chance(const double val) {
+ dclocal_read_repair_chance = val;
+ __isset.dclocal_read_repair_chance = true;
+ }
+
+ void __set_row_cache_size(const double val) {
+ row_cache_size = val;
+ __isset.row_cache_size = true;
+ }
+
+ void __set_key_cache_size(const double val) {
+ key_cache_size = val;
+ __isset.key_cache_size = true;
+ }
+
+ void __set_row_cache_save_period_in_seconds(const int32_t val) {
+ row_cache_save_period_in_seconds = val;
+ __isset.row_cache_save_period_in_seconds = true;
+ }
+
+ void __set_key_cache_save_period_in_seconds(const int32_t val) {
+ key_cache_save_period_in_seconds = val;
+ __isset.key_cache_save_period_in_seconds = true;
+ }
+
+ void __set_memtable_flush_after_mins(const int32_t val) {
+ memtable_flush_after_mins = val;
+ __isset.memtable_flush_after_mins = true;
+ }
+
+ void __set_memtable_throughput_in_mb(const int32_t val) {
+ memtable_throughput_in_mb = val;
+ __isset.memtable_throughput_in_mb = true;
+ }
+
+ void __set_memtable_operations_in_millions(const double val) {
+ memtable_operations_in_millions = val;
+ __isset.memtable_operations_in_millions = true;
+ }
+
+ void __set_merge_shards_chance(const double val) {
+ merge_shards_chance = val;
+ __isset.merge_shards_chance = true;
+ }
+
+ void __set_row_cache_provider(const std::string& val) {
+ row_cache_provider = val;
+ __isset.row_cache_provider = true;
+ }
+
+ void __set_row_cache_keys_to_save(const int32_t val) {
+ row_cache_keys_to_save = val;
+ __isset.row_cache_keys_to_save = true;
+ }
+
+ bool operator == (const CfDef & rhs) const
+ {
+ if (!(keyspace == rhs.keyspace))
+ return false;
+ if (!(name == rhs.name))
+ return false;
+ if (__isset.column_type != rhs.__isset.column_type)
+ return false;
+ else if (__isset.column_type && !(column_type == rhs.column_type))
+ return false;
+ if (__isset.comparator_type != rhs.__isset.comparator_type)
+ return false;
+ else if (__isset.comparator_type && !(comparator_type == rhs.comparator_type))
+ return false;
+ if (__isset.subcomparator_type != rhs.__isset.subcomparator_type)
+ return false;
+ else if (__isset.subcomparator_type && !(subcomparator_type == rhs.subcomparator_type))
+ return false;
+ if (__isset.comment != rhs.__isset.comment)
+ return false;
+ else if (__isset.comment && !(comment == rhs.comment))
+ return false;
+ if (__isset.read_repair_chance != rhs.__isset.read_repair_chance)
+ return false;
+ else if (__isset.read_repair_chance && !(read_repair_chance == rhs.read_repair_chance))
+ return false;
+ if (__isset.column_metadata != rhs.__isset.column_metadata)
+ return false;
+ else if (__isset.column_metadata && !(column_metadata == rhs.column_metadata))
+ return false;
+ if (__isset.gc_grace_seconds != rhs.__isset.gc_grace_seconds)
+ return false;
+ else if (__isset.gc_grace_seconds && !(gc_grace_seconds == rhs.gc_grace_seconds))
+ return false;
+ if (__isset.default_validation_class != rhs.__isset.default_validation_class)
+ return false;
+ else if (__isset.default_validation_class && !(default_validation_class == rhs.default_validation_class))
+ return false;
+ if (__isset.id != rhs.__isset.id)
+ return false;
+ else if (__isset.id && !(id == rhs.id))
+ return false;
+ if (__isset.min_compaction_threshold != rhs.__isset.min_compaction_threshold)
+ return false;
+ else if (__isset.min_compaction_threshold && !(min_compaction_threshold == rhs.min_compaction_threshold))
+ return false;
+ if (__isset.max_compaction_threshold != rhs.__isset.max_compaction_threshold)
+ return false;
+ else if (__isset.max_compaction_threshold && !(max_compaction_threshold == rhs.max_compaction_threshold))
+ return false;
+ if (__isset.replicate_on_write != rhs.__isset.replicate_on_write)
+ return false;
+ else if (__isset.replicate_on_write && !(replicate_on_write == rhs.replicate_on_write))
+ return false;
+ if (__isset.key_validation_class != rhs.__isset.key_validation_class)
+ return false;
+ else if (__isset.key_validation_class && !(key_validation_class == rhs.key_validation_class))
+ return false;
+ if (__isset.key_alias != rhs.__isset.key_alias)
+ return false;
+ else if (__isset.key_alias && !(key_alias == rhs.key_alias))
+ return false;
+ if (__isset.compaction_strategy != rhs.__isset.compaction_strategy)
+ return false;
+ else if (__isset.compaction_strategy && !(compaction_strategy == rhs.compaction_strategy))
+ return false;
+ if (__isset.compaction_strategy_options != rhs.__isset.compaction_strategy_options)
+ return false;
+ else if (__isset.compaction_strategy_options && !(compaction_strategy_options == rhs.compaction_strategy_options))
+ return false;
+ if (__isset.compression_options != rhs.__isset.compression_options)
+ return false;
+ else if (__isset.compression_options && !(compression_options == rhs.compression_options))
+ return false;
+ if (__isset.bloom_filter_fp_chance != rhs.__isset.bloom_filter_fp_chance)
+ return false;
+ else if (__isset.bloom_filter_fp_chance && !(bloom_filter_fp_chance == rhs.bloom_filter_fp_chance))
+ return false;
+ if (__isset.caching != rhs.__isset.caching)
+ return false;
+ else if (__isset.caching && !(caching == rhs.caching))
+ return false;
+ if (__isset.dclocal_read_repair_chance != rhs.__isset.dclocal_read_repair_chance)
+ return false;
+ else if (__isset.dclocal_read_repair_chance && !(dclocal_read_repair_chance == rhs.dclocal_read_repair_chance))
+ return false;
+ if (__isset.row_cache_size != rhs.__isset.row_cache_size)
+ return false;
+ else if (__isset.row_cache_size && !(row_cache_size == rhs.row_cache_size))
+ return false;
+ if (__isset.key_cache_size != rhs.__isset.key_cache_size)
+ return false;
+ else if (__isset.key_cache_size && !(key_cache_size == rhs.key_cache_size))
+ return false;
+ if (__isset.row_cache_save_period_in_seconds != rhs.__isset.row_cache_save_period_in_seconds)
+ return false;
+ else if (__isset.row_cache_save_period_in_seconds && !(row_cache_save_period_in_seconds == rhs.row_cache_save_period_in_seconds))
+ return false;
+ if (__isset.key_cache_save_period_in_seconds != rhs.__isset.key_cache_save_period_in_seconds)
+ return false;
+ else if (__isset.key_cache_save_period_in_seconds && !(key_cache_save_period_in_seconds == rhs.key_cache_save_period_in_seconds))
+ return false;
+ if (__isset.memtable_flush_after_mins != rhs.__isset.memtable_flush_after_mins)
+ return false;
+ else if (__isset.memtable_flush_after_mins && !(memtable_flush_after_mins == rhs.memtable_flush_after_mins))
+ return false;
+ if (__isset.memtable_throughput_in_mb != rhs.__isset.memtable_throughput_in_mb)
+ return false;
+ else if (__isset.memtable_throughput_in_mb && !(memtable_throughput_in_mb == rhs.memtable_throughput_in_mb))
+ return false;
+ if (__isset.memtable_operations_in_millions != rhs.__isset.memtable_operations_in_millions)
+ return false;
+ else if (__isset.memtable_operations_in_millions && !(memtable_operations_in_millions == rhs.memtable_operations_in_millions))
+ return false;
+ if (__isset.merge_shards_chance != rhs.__isset.merge_shards_chance)
+ return false;
+ else if (__isset.merge_shards_chance && !(merge_shards_chance == rhs.merge_shards_chance))
+ return false;
+ if (__isset.row_cache_provider != rhs.__isset.row_cache_provider)
+ return false;
+ else if (__isset.row_cache_provider && !(row_cache_provider == rhs.row_cache_provider))
+ return false;
+ if (__isset.row_cache_keys_to_save != rhs.__isset.row_cache_keys_to_save)
+ return false;
+ else if (__isset.row_cache_keys_to_save && !(row_cache_keys_to_save == rhs.row_cache_keys_to_save))
+ return false;
+ return true;
+ }
+ bool operator != (const CfDef &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const CfDef & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _KsDef__isset {
+ _KsDef__isset() : strategy_options(false), replication_factor(false), durable_writes(false) {}
+ bool strategy_options;
+ bool replication_factor;
+ bool durable_writes;
+} _KsDef__isset;
+
+class KsDef {
+ public:
+
+ static const char* ascii_fingerprint; // = "0767851B6476EB3777A21E59E912E11A";
+ static const uint8_t binary_fingerprint[16]; // = {0x07,0x67,0x85,0x1B,0x64,0x76,0xEB,0x37,0x77,0xA2,0x1E,0x59,0xE9,0x12,0xE1,0x1A};
+
+ KsDef() : name(""), strategy_class(""), replication_factor(0), durable_writes(true) {
+ }
+
+ virtual ~KsDef() throw() {}
+
+ std::string name;
+ std::string strategy_class;
+ std::map<std::string, std::string> strategy_options;
+ int32_t replication_factor;
+ std::vector<CfDef> cf_defs;
+ bool durable_writes;
+
+ _KsDef__isset __isset;
+
+ void __set_name(const std::string& val) {
+ name = val;
+ }
+
+ void __set_strategy_class(const std::string& val) {
+ strategy_class = val;
+ }
+
+ void __set_strategy_options(const std::map<std::string, std::string> & val) {
+ strategy_options = val;
+ __isset.strategy_options = true;
+ }
+
+ void __set_replication_factor(const int32_t val) {
+ replication_factor = val;
+ __isset.replication_factor = true;
+ }
+
+ void __set_cf_defs(const std::vector<CfDef> & val) {
+ cf_defs = val;
+ }
+
+ void __set_durable_writes(const bool val) {
+ durable_writes = val;
+ __isset.durable_writes = true;
+ }
+
+ bool operator == (const KsDef & rhs) const
+ {
+ if (!(name == rhs.name))
+ return false;
+ if (!(strategy_class == rhs.strategy_class))
+ return false;
+ if (__isset.strategy_options != rhs.__isset.strategy_options)
+ return false;
+ else if (__isset.strategy_options && !(strategy_options == rhs.strategy_options))
+ return false;
+ if (__isset.replication_factor != rhs.__isset.replication_factor)
+ return false;
+ else if (__isset.replication_factor && !(replication_factor == rhs.replication_factor))
+ return false;
+ if (!(cf_defs == rhs.cf_defs))
+ return false;
+ if (__isset.durable_writes != rhs.__isset.durable_writes)
+ return false;
+ else if (__isset.durable_writes && !(durable_writes == rhs.durable_writes))
+ return false;
+ return true;
+ }
+ bool operator != (const KsDef &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const KsDef & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class CqlRow {
+ public:
+
+ static const char* ascii_fingerprint; // = "470EFC558004E98D92D604898305C04E";
+ static const uint8_t binary_fingerprint[16]; // = {0x47,0x0E,0xFC,0x55,0x80,0x04,0xE9,0x8D,0x92,0xD6,0x04,0x89,0x83,0x05,0xC0,0x4E};
+
+ CqlRow() : key("") {
+ }
+
+ virtual ~CqlRow() throw() {}
+
+ std::string key;
+ std::vector<Column> columns;
+
+ void __set_key(const std::string& val) {
+ key = val;
+ }
+
+ void __set_columns(const std::vector<Column> & val) {
+ columns = val;
+ }
+
+ bool operator == (const CqlRow & rhs) const
+ {
+ if (!(key == rhs.key))
+ return false;
+ if (!(columns == rhs.columns))
+ return false;
+ return true;
+ }
+ bool operator != (const CqlRow &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const CqlRow & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class CqlMetadata {
+ public:
+
+ static const char* ascii_fingerprint; // = "B7C5A4AA9652C744A48EBC1C12D531E7";
+ static const uint8_t binary_fingerprint[16]; // = {0xB7,0xC5,0xA4,0xAA,0x96,0x52,0xC7,0x44,0xA4,0x8E,0xBC,0x1C,0x12,0xD5,0x31,0xE7};
+
+ CqlMetadata() : default_name_type(""), default_value_type("") {
+ }
+
+ virtual ~CqlMetadata() throw() {}
+
+ std::map<std::string, std::string> name_types;
+ std::map<std::string, std::string> value_types;
+ std::string default_name_type;
+ std::string default_value_type;
+
+ void __set_name_types(const std::map<std::string, std::string> & val) {
+ name_types = val;
+ }
+
+ void __set_value_types(const std::map<std::string, std::string> & val) {
+ value_types = val;
+ }
+
+ void __set_default_name_type(const std::string& val) {
+ default_name_type = val;
+ }
+
+ void __set_default_value_type(const std::string& val) {
+ default_value_type = val;
+ }
+
+ bool operator == (const CqlMetadata & rhs) const
+ {
+ if (!(name_types == rhs.name_types))
+ return false;
+ if (!(value_types == rhs.value_types))
+ return false;
+ if (!(default_name_type == rhs.default_name_type))
+ return false;
+ if (!(default_value_type == rhs.default_value_type))
+ return false;
+ return true;
+ }
+ bool operator != (const CqlMetadata &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const CqlMetadata & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _CqlResult__isset {
+ _CqlResult__isset() : rows(false), num(false), schema(false) {}
+ bool rows;
+ bool num;
+ bool schema;
+} _CqlResult__isset;
+
+class CqlResult {
+ public:
+
+ static const char* ascii_fingerprint; // = "521B9CE5AF77539F7267F6952B609E81";
+ static const uint8_t binary_fingerprint[16]; // = {0x52,0x1B,0x9C,0xE5,0xAF,0x77,0x53,0x9F,0x72,0x67,0xF6,0x95,0x2B,0x60,0x9E,0x81};
+
+ CqlResult() : type((CqlResultType::type)0), num(0) {
+ }
+
+ virtual ~CqlResult() throw() {}
+
+ CqlResultType::type type;
+ std::vector<CqlRow> rows;
+ int32_t num;
+ CqlMetadata schema;
+
+ _CqlResult__isset __isset;
+
+ void __set_type(const CqlResultType::type val) {
+ type = val;
+ }
+
+ void __set_rows(const std::vector<CqlRow> & val) {
+ rows = val;
+ __isset.rows = true;
+ }
+
+ void __set_num(const int32_t val) {
+ num = val;
+ __isset.num = true;
+ }
+
+ void __set_schema(const CqlMetadata& val) {
+ schema = val;
+ __isset.schema = true;
+ }
+
+ bool operator == (const CqlResult & rhs) const
+ {
+ if (!(type == rhs.type))
+ return false;
+ if (__isset.rows != rhs.__isset.rows)
+ return false;
+ else if (__isset.rows && !(rows == rhs.rows))
+ return false;
+ if (__isset.num != rhs.__isset.num)
+ return false;
+ else if (__isset.num && !(num == rhs.num))
+ return false;
+ if (__isset.schema != rhs.__isset.schema)
+ return false;
+ else if (__isset.schema && !(schema == rhs.schema))
+ return false;
+ return true;
+ }
+ bool operator != (const CqlResult &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const CqlResult & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _CqlPreparedResult__isset {
+ _CqlPreparedResult__isset() : variable_types(false), variable_names(false) {}
+ bool variable_types;
+ bool variable_names;
+} _CqlPreparedResult__isset;
+
+class CqlPreparedResult {
+ public:
+
+ static const char* ascii_fingerprint; // = "7E1663EC688DFDC28722BF36F9F64E6F";
+ static const uint8_t binary_fingerprint[16]; // = {0x7E,0x16,0x63,0xEC,0x68,0x8D,0xFD,0xC2,0x87,0x22,0xBF,0x36,0xF9,0xF6,0x4E,0x6F};
+
+ CqlPreparedResult() : itemId(0), count(0) {
+ }
+
+ virtual ~CqlPreparedResult() throw() {}
+
+ int32_t itemId;
+ int32_t count;
+ std::vector<std::string> variable_types;
+ std::vector<std::string> variable_names;
+
+ _CqlPreparedResult__isset __isset;
+
+ void __set_itemId(const int32_t val) {
+ itemId = val;
+ }
+
+ void __set_count(const int32_t val) {
+ count = val;
+ }
+
+ void __set_variable_types(const std::vector<std::string> & val) {
+ variable_types = val;
+ __isset.variable_types = true;
+ }
+
+ void __set_variable_names(const std::vector<std::string> & val) {
+ variable_names = val;
+ __isset.variable_names = true;
+ }
+
+ bool operator == (const CqlPreparedResult & rhs) const
+ {
+ if (!(itemId == rhs.itemId))
+ return false;
+ if (!(count == rhs.count))
+ return false;
+ if (__isset.variable_types != rhs.__isset.variable_types)
+ return false;
+ else if (__isset.variable_types && !(variable_types == rhs.variable_types))
+ return false;
+ if (__isset.variable_names != rhs.__isset.variable_names)
+ return false;
+ else if (__isset.variable_names && !(variable_names == rhs.variable_names))
+ return false;
+ return true;
+ }
+ bool operator != (const CqlPreparedResult &rhs) const {
+ return !(*this == rhs);
+ }
+
+ bool operator < (const CqlPreparedResult & ) const;
+
+ uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+ uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+}}} // namespace
+
+#endif
diff --git a/storage/cassandra/ha_cassandra.cc b/storage/cassandra/ha_cassandra.cc
new file mode 100644
index 00000000000..3934f288085
--- /dev/null
+++ b/storage/cassandra/ha_cassandra.cc
@@ -0,0 +1,2619 @@
+/*
+ Copyright (c) 2012, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef USE_PRAGMA_IMPLEMENTATION
+#pragma implementation // gcc: Class implementation
+#endif
+
+#include <mysql/plugin.h>
+#include "ha_cassandra.h"
+#include "sql_class.h"
+
+#define DYNCOL_USUAL 20
+#define DYNCOL_DELTA 100
+#define DYNCOL_USUAL_REC 1024
+#define DYNCOL_DELTA_REC 1024
+
+static handler *cassandra_create_handler(handlerton *hton,
+ TABLE_SHARE *table,
+ MEM_ROOT *mem_root);
+
+extern int dynamic_column_error_message(enum_dyncol_func_result rc);
+
+handlerton *cassandra_hton;
+
+
+/*
+ Hash used to track the number of open tables; variable for example share
+ methods
+*/
+static HASH cassandra_open_tables;
+
+/* The mutex used to init the hash; variable for example share methods */
+mysql_mutex_t cassandra_mutex;
+
+
+/**
+ Structure for CREATE TABLE options (table options).
+ It needs to be called ha_table_option_struct.
+
+ The option values can be specified in the CREATE TABLE at the end:
+ CREATE TABLE ( ... ) *here*
+*/
+
+struct ha_table_option_struct
+{
+ const char *thrift_host;
+ int thrift_port;
+ const char *keyspace;
+ const char *column_family;
+};
+
+
+ha_create_table_option cassandra_table_option_list[]=
+{
+ /*
+ one option that takes an arbitrary string
+ */
+ HA_TOPTION_STRING("thrift_host", thrift_host),
+ HA_TOPTION_NUMBER("thrift_port", thrift_port, 9160, 1, 65535, 0),
+ HA_TOPTION_STRING("keyspace", keyspace),
+ HA_TOPTION_STRING("column_family", column_family),
+ HA_TOPTION_END
+};
+
+/**
+ Structure for CREATE TABLE options (field options).
+*/
+
+struct ha_field_option_struct
+{
+ bool dyncol_field;
+};
+
+ha_create_table_option cassandra_field_option_list[]=
+{
+ /*
+ Collect all other columns as dynamic here,
+ the valid values are YES/NO, ON/OFF, 1/0.
+ The default is 0, that is false, no, off.
+ */
+ HA_FOPTION_BOOL("DYNAMIC_COLUMN_STORAGE", dyncol_field, 0),
+ HA_FOPTION_END
+};
+
+static MYSQL_THDVAR_ULONG(insert_batch_size, PLUGIN_VAR_RQCMDARG,
+ "Number of rows in an INSERT batch",
+ NULL, NULL, /*default*/ 100, /*min*/ 1, /*max*/ 1024*1024*1024, 0);
+
+static MYSQL_THDVAR_ULONG(multiget_batch_size, PLUGIN_VAR_RQCMDARG,
+ "Number of rows in a multiget(MRR) batch",
+ NULL, NULL, /*default*/ 100, /*min*/ 1, /*max*/ 1024*1024*1024, 0);
+
+static MYSQL_THDVAR_ULONG(rnd_batch_size, PLUGIN_VAR_RQCMDARG,
+ "Number of rows in an rnd_read (full scan) batch",
+ NULL, NULL, /*default*/ 10*1000, /*min*/ 1, /*max*/ 1024*1024*1024, 0);
+
+static MYSQL_THDVAR_ULONG(failure_retries, PLUGIN_VAR_RQCMDARG,
+ "Number of times to retry Cassandra calls that failed due to timeouts or "
+ "network communication problems. The default, 0, means not to retry.",
+ NULL, NULL, /*default*/ 0, /*min*/ 0, /*max*/ 1024*1024*1024, 0);
+
+/* These match values in enum_cassandra_consistency_level */
+const char *cassandra_consistency_level[] =
+{
+ "ONE",
+ "QUORUM",
+ "LOCAL_QUORUM",
+ "EACH_QUORUM",
+ "ALL",
+ "ANY",
+ "TWO",
+ "THREE",
+ NullS
+};
+
+TYPELIB cassandra_consistency_level_typelib= {
+ array_elements(cassandra_consistency_level) - 1, "",
+ cassandra_consistency_level, NULL
+};
+
+
+static MYSQL_THDVAR_ENUM(write_consistency, PLUGIN_VAR_RQCMDARG,
+ "Cassandra consistency level to use for write operations", NULL, NULL,
+ ONE, &cassandra_consistency_level_typelib);
+
+static MYSQL_THDVAR_ENUM(read_consistency, PLUGIN_VAR_RQCMDARG,
+ "Cassandra consistency level to use for read operations", NULL, NULL,
+ ONE, &cassandra_consistency_level_typelib);
+
+
+mysql_mutex_t cassandra_default_host_lock;
+static char* cassandra_default_thrift_host = NULL;
+static char cassandra_default_host_buf[256]="";
+
+static void
+cassandra_default_thrift_host_update(THD *thd,
+ struct st_mysql_sys_var* var,
+ void* var_ptr, /*!< out: where the
+ formal string goes */
+ const void* save) /*!< in: immediate result
+ from check function */
+{
+ const char *new_host= *((char**)save);
+ const size_t max_len= sizeof(cassandra_default_host_buf);
+
+ mysql_mutex_lock(&cassandra_default_host_lock);
+
+ if (new_host)
+ {
+ strncpy(cassandra_default_host_buf, new_host, max_len-1);
+ cassandra_default_host_buf[max_len-1]= 0;
+ cassandra_default_thrift_host= cassandra_default_host_buf;
+ }
+ else
+ {
+ cassandra_default_host_buf[0]= 0;
+ cassandra_default_thrift_host= NULL;
+ }
+
+ *((const char**)var_ptr)= cassandra_default_thrift_host;
+
+ mysql_mutex_unlock(&cassandra_default_host_lock);
+}
+
+
+static MYSQL_SYSVAR_STR(default_thrift_host, cassandra_default_thrift_host,
+ PLUGIN_VAR_RQCMDARG,
+ "Default host for Cassandra thrift connections",
+ /*check*/NULL,
+ cassandra_default_thrift_host_update,
+ /*default*/NULL);
+
+static struct st_mysql_sys_var* cassandra_system_variables[]= {
+ MYSQL_SYSVAR(insert_batch_size),
+ MYSQL_SYSVAR(multiget_batch_size),
+ MYSQL_SYSVAR(rnd_batch_size),
+
+ MYSQL_SYSVAR(default_thrift_host),
+ MYSQL_SYSVAR(write_consistency),
+ MYSQL_SYSVAR(read_consistency),
+ MYSQL_SYSVAR(failure_retries),
+ NULL
+};
+
+Cassandra_status_vars cassandra_counters;
+
+/**
+ @brief
+ Function we use in the creation of our hash to get key.
+*/
+
+static uchar* cassandra_get_key(CASSANDRA_SHARE *share, size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length=share->table_name_length;
+ return (uchar*) share->table_name;
+}
+
+#ifdef HAVE_PSI_INTERFACE
+static PSI_mutex_key ex_key_mutex_example, ex_key_mutex_CASSANDRA_SHARE_mutex;
+
+static PSI_mutex_info all_cassandra_mutexes[]=
+{
+ { &ex_key_mutex_example, "cassandra", PSI_FLAG_GLOBAL},
+ { &ex_key_mutex_CASSANDRA_SHARE_mutex, "CASSANDRA_SHARE::mutex", 0}
+};
+
+static void init_cassandra_psi_keys()
+{
+ const char* category= "cassandra";
+ int count;
+
+ if (PSI_server == NULL)
+ return;
+
+ count= array_elements(all_cassandra_mutexes);
+ PSI_server->register_mutex(category, all_cassandra_mutexes, count);
+}
+#endif
+
+static int cassandra_init_func(void *p)
+{
+ DBUG_ENTER("cassandra_init_func");
+
+#ifdef HAVE_PSI_INTERFACE
+ init_cassandra_psi_keys();
+#endif
+
+ cassandra_hton= (handlerton *)p;
+ mysql_mutex_init(ex_key_mutex_example, &cassandra_mutex, MY_MUTEX_INIT_FAST);
+ (void) my_hash_init(&cassandra_open_tables,system_charset_info,32,0,0,
+ (my_hash_get_key) cassandra_get_key,0,0);
+
+ cassandra_hton->state= SHOW_OPTION_YES;
+ cassandra_hton->create= cassandra_create_handler;
+ /*
+ Don't specify HTON_CAN_RECREATE in flags. re-create is used by TRUNCATE
+ TABLE to create an *empty* table from scratch. Cassandra table won't be
+ emptied if re-created.
+ */
+ cassandra_hton->flags= 0;
+ cassandra_hton->table_options= cassandra_table_option_list;
+ cassandra_hton->field_options= cassandra_field_option_list;
+
+ mysql_mutex_init(0 /* no instrumentation */,
+ &cassandra_default_host_lock, MY_MUTEX_INIT_FAST);
+
+ DBUG_RETURN(0);
+}
+
+
+static int cassandra_done_func(void *p)
+{
+ int error= 0;
+ DBUG_ENTER("cassandra_done_func");
+ if (cassandra_open_tables.records)
+ error= 1;
+ my_hash_free(&cassandra_open_tables);
+ mysql_mutex_destroy(&cassandra_mutex);
+ mysql_mutex_destroy(&cassandra_default_host_lock);
+ DBUG_RETURN(error);
+}
+
+
+/**
+ @brief
+ Example of simple lock controls. The "share" it creates is a
+ structure we will pass to each cassandra handler. Do you have to have
+ one of these? Well, you have pieces that are used for locking, and
+ they are needed to function.
+*/
+
+static CASSANDRA_SHARE *get_share(const char *table_name, TABLE *table)
+{
+ CASSANDRA_SHARE *share;
+ uint length;
+ char *tmp_name;
+
+ mysql_mutex_lock(&cassandra_mutex);
+ length=(uint) strlen(table_name);
+
+ if (!(share=(CASSANDRA_SHARE*) my_hash_search(&cassandra_open_tables,
+ (uchar*) table_name,
+ length)))
+ {
+ if (!(share=(CASSANDRA_SHARE *)
+ my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &share, sizeof(*share),
+ &tmp_name, length+1,
+ NullS)))
+ {
+ mysql_mutex_unlock(&cassandra_mutex);
+ return NULL;
+ }
+
+ share->use_count=0;
+ share->table_name_length=length;
+ share->table_name=tmp_name;
+ strmov(share->table_name,table_name);
+ if (my_hash_insert(&cassandra_open_tables, (uchar*) share))
+ goto error;
+ thr_lock_init(&share->lock);
+ mysql_mutex_init(ex_key_mutex_CASSANDRA_SHARE_mutex,
+ &share->mutex, MY_MUTEX_INIT_FAST);
+ }
+ share->use_count++;
+ mysql_mutex_unlock(&cassandra_mutex);
+
+ return share;
+
+error:
+ mysql_mutex_destroy(&share->mutex);
+ my_free(share);
+
+ return NULL;
+}
+
+
+/**
+ @brief
+ Free lock controls. We call this whenever we close a table. If the table had
+ the last reference to the share, then we free memory associated with it.
+*/
+
+static int free_share(CASSANDRA_SHARE *share)
+{
+ mysql_mutex_lock(&cassandra_mutex);
+ if (!--share->use_count)
+ {
+ my_hash_delete(&cassandra_open_tables, (uchar*) share);
+ thr_lock_delete(&share->lock);
+ mysql_mutex_destroy(&share->mutex);
+ my_free(share);
+ }
+ mysql_mutex_unlock(&cassandra_mutex);
+
+ return 0;
+}
+
+
+static handler* cassandra_create_handler(handlerton *hton,
+ TABLE_SHARE *table,
+ MEM_ROOT *mem_root)
+{
+ return new (mem_root) ha_cassandra(hton, table);
+}
+
+
+ha_cassandra::ha_cassandra(handlerton *hton, TABLE_SHARE *table_arg)
+ :handler(hton, table_arg),
+ se(NULL), field_converters(NULL),
+ special_type_field_converters(NULL),
+ special_type_field_names(NULL), n_special_type_fields(0),
+ rowkey_converter(NULL),
+ dyncol_field(0), dyncol_set(0)
+{}
+
+
+static const char *ha_cassandra_exts[] = {
+ NullS
+};
+
+const char **ha_cassandra::bas_ext() const
+{
+ return ha_cassandra_exts;
+}
+
+
+int ha_cassandra::connect_and_check_options(TABLE *table_arg)
+{
+ ha_table_option_struct *options= table_arg->s->option_struct;
+ int res;
+ DBUG_ENTER("ha_cassandra::connect_and_check_options");
+
+ if ((res= check_field_options(table_arg->s->field)) ||
+ (res= check_table_options(options)))
+ DBUG_RETURN(res);
+
+ se= create_cassandra_se();
+ se->set_column_family(options->column_family);
+ const char *thrift_host= options->thrift_host? options->thrift_host:
+ cassandra_default_thrift_host;
+ if (se->connect(thrift_host, options->thrift_port, options->keyspace))
+ {
+ my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), se->error_str());
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+
+ if (setup_field_converters(table_arg->field, table_arg->s->fields))
+ {
+ DBUG_RETURN(HA_ERR_NO_CONNECTION);
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+int ha_cassandra::check_field_options(Field **fields)
+{
+ Field **field;
+ uint i;
+ DBUG_ENTER("ha_cassandra::check_field_options");
+ for (field= fields, i= 0; *field; field++, i++)
+ {
+ ha_field_option_struct *field_options= (*field)->option_struct;
+ if (field_options && field_options->dyncol_field)
+ {
+ if (dyncol_set || (*field)->type() != MYSQL_TYPE_BLOB)
+ {
+ my_error(ER_WRONG_FIELD_SPEC, MYF(0), (*field)->field_name);
+ DBUG_RETURN(HA_WRONG_CREATE_OPTION);
+ }
+ dyncol_set= 1;
+ dyncol_field= i;
+ bzero(&dynamic_values, sizeof(dynamic_values));
+ bzero(&dynamic_names, sizeof(dynamic_names));
+ bzero(&dynamic_rec, sizeof(dynamic_rec));
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+int ha_cassandra::open(const char *name, int mode, uint test_if_locked)
+{
+ DBUG_ENTER("ha_cassandra::open");
+
+ if (!(share = get_share(name, table)))
+ DBUG_RETURN(1);
+ thr_lock_data_init(&share->lock,&lock,NULL);
+
+ DBUG_ASSERT(!se);
+ /*
+ Don't do the following on open: it prevents SHOW CREATE TABLE when the server
+ has gone away.
+ */
+ /*
+ int res;
+ if ((res= connect_and_check_options(table)))
+ {
+ DBUG_RETURN(res);
+ }
+ */
+
+ info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
+ insert_lineno= 0;
+
+ DBUG_RETURN(0);
+}
+
+
+int ha_cassandra::close(void)
+{
+ DBUG_ENTER("ha_cassandra::close");
+ delete se;
+ se= NULL;
+ free_field_converters();
+ DBUG_RETURN(free_share(share));
+}
+
+
+int ha_cassandra::check_table_options(ha_table_option_struct *options)
+{
+ if (!options->thrift_host && (!cassandra_default_thrift_host ||
+ !cassandra_default_thrift_host[0]))
+ {
+ my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0),
+ "thrift_host table option must be specified, or "
+ "@@cassandra_default_thrift_host must be set");
+ return HA_WRONG_CREATE_OPTION;
+ }
+
+ if (!options->keyspace || !options->column_family)
+ {
+ my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0),
+ "keyspace and column_family table options must be specified");
+ return HA_WRONG_CREATE_OPTION;
+ }
+ return 0;
+}
+
+
+/**
+ @brief
+ create() is called to create a table. The variable name will have the name
+ of the table.
+
+ @details
+ When create() is called you do not need to worry about
+ opening the table. Also, the .frm file will have already been
+ created so adjusting create_info is not necessary. You can overwrite
+ the .frm file at this point if you wish to change the table
+ definition, but there are no methods currently provided for doing
+ so.
+
+ Called from handle.cc by ha_create_table().
+
+ @see
+ ha_create_table() in handle.cc
+*/
+
+int ha_cassandra::create(const char *name, TABLE *table_arg,
+ HA_CREATE_INFO *create_info)
+{
+ int res;
+ DBUG_ENTER("ha_cassandra::create");
+
+ if (table_arg->s->keys != 1 || table_arg->s->primary_key !=0 ||
+ table_arg->key_info[0].key_parts != 1 ||
+ table_arg->key_info[0].key_part[0].fieldnr != 1)
+ {
+ my_error(ER_WRONG_COLUMN_NAME, MYF(0),
+ "Table must have PRIMARY KEY defined over the first column");
+ DBUG_RETURN(HA_WRONG_CREATE_OPTION);
+ }
+
+ DBUG_ASSERT(!se);
+ if ((res= connect_and_check_options(table_arg)))
+ DBUG_RETURN(res);
+
+ insert_lineno= 0;
+ DBUG_RETURN(0);
+}
+
+/*
+ Mapping needs to
+ - copy value from MySQL record to Thrift buffer
+ - copy value from Thrift bufer to MySQL record..
+
+*/
+
+/* Converter base */
+class ColumnDataConverter
+{
+public:
+ Field *field;
+
+ /* This will save Cassandra's data in the Field */
+ virtual int cassandra_to_mariadb(const char *cass_data,
+ int cass_data_len)=0;
+
+ /*
+ This will get data from the Field pointer, store Cassandra's form
+ in internal buffer, and return pointer/size.
+
+ @return
+ false - OK
+ true - Failed to convert value (completely, there is no value to insert
+ at all).
+ */
+ virtual bool mariadb_to_cassandra(char **cass_data, int *cass_data_len)=0;
+ virtual ~ColumnDataConverter() {};
+};
+
+
+class DoubleDataConverter : public ColumnDataConverter
+{
+ double buf;
+public:
+ int cassandra_to_mariadb(const char *cass_data, int cass_data_len)
+ {
+ DBUG_ASSERT(cass_data_len == sizeof(double));
+ double *pdata= (double*) cass_data;
+ field->store(*pdata);
+ return 0;
+ }
+
+ bool mariadb_to_cassandra(char **cass_data, int *cass_data_len)
+ {
+ buf= field->val_real();
+ *cass_data= (char*)&buf;
+ *cass_data_len=sizeof(double);
+ return false;
+ }
+ ~DoubleDataConverter(){}
+};
+
+
+class FloatDataConverter : public ColumnDataConverter
+{
+ float buf;
+public:
+ int cassandra_to_mariadb(const char *cass_data, int cass_data_len)
+ {
+ DBUG_ASSERT(cass_data_len == sizeof(float));
+ float *pdata= (float*) cass_data;
+ field->store(*pdata);
+ return 0;
+ }
+
+ bool mariadb_to_cassandra(char **cass_data, int *cass_data_len)
+ {
+ buf= field->val_real();
+ *cass_data= (char*)&buf;
+ *cass_data_len=sizeof(float);
+ return false;
+ }
+ ~FloatDataConverter(){}
+};
+
+static void flip64(const char *from, char* to)
+{
+ to[0]= from[7];
+ to[1]= from[6];
+ to[2]= from[5];
+ to[3]= from[4];
+ to[4]= from[3];
+ to[5]= from[2];
+ to[6]= from[1];
+ to[7]= from[0];
+}
+
+class BigintDataConverter : public ColumnDataConverter
+{
+ longlong buf;
+ bool flip; /* is false when reading counter columns */
+public:
+ int cassandra_to_mariadb(const char *cass_data, int cass_data_len)
+ {
+ longlong tmp;
+ DBUG_ASSERT(cass_data_len == sizeof(longlong));
+ if (flip)
+ flip64(cass_data, (char*)&tmp);
+ else
+ memcpy(&tmp, cass_data, sizeof(longlong));
+ field->store(tmp);
+ return 0;
+ }
+
+ bool mariadb_to_cassandra(char **cass_data, int *cass_data_len)
+ {
+ longlong tmp= field->val_int();
+ if (flip)
+ flip64((const char*)&tmp, (char*)&buf);
+ else
+ memcpy(&buf, &tmp, sizeof(longlong));
+ *cass_data= (char*)&buf;
+ *cass_data_len=sizeof(longlong);
+ return false;
+ }
+ BigintDataConverter(bool flip_arg) : flip(flip_arg) {}
+ ~BigintDataConverter(){}
+};
+
+static void flip32(const char *from, char* to)
+{
+ to[0]= from[3];
+ to[1]= from[2];
+ to[2]= from[1];
+ to[3]= from[0];
+}
+
+
+class TinyintDataConverter : public ColumnDataConverter
+{
+ char buf;
+public:
+ int cassandra_to_mariadb(const char *cass_data, int cass_data_len)
+ {
+ DBUG_ASSERT(cass_data_len == 1);
+ field->store(cass_data[0]);
+ return 0;
+ }
+
+ bool mariadb_to_cassandra(char **cass_data, int *cass_data_len)
+ {
+ buf= field->val_int()? 1 : 0; /* TODO: error handling? */
+ *cass_data= (char*)&buf;
+ *cass_data_len= 1;
+ return false;
+ }
+ ~TinyintDataConverter(){}
+};
+
+
+class Int32DataConverter : public ColumnDataConverter
+{
+ int32_t buf;
+public:
+ int cassandra_to_mariadb(const char *cass_data, int cass_data_len)
+ {
+ int32_t tmp;
+ DBUG_ASSERT(cass_data_len == sizeof(int32_t));
+ flip32(cass_data, (char*)&tmp);
+ field->store(tmp);
+ return 0;
+ }
+
+ bool mariadb_to_cassandra(char **cass_data, int *cass_data_len)
+ {
+ int32_t tmp= field->val_int();
+ flip32((const char*)&tmp, (char*)&buf);
+ *cass_data= (char*)&buf;
+ *cass_data_len=sizeof(int32_t);
+ return false;
+ }
+ ~Int32DataConverter(){}
+};
+
+
+class StringCopyConverter : public ColumnDataConverter
+{
+ String buf;
+ size_t max_length;
+public:
+ int cassandra_to_mariadb(const char *cass_data, int cass_data_len)
+ {
+ if ((size_t)cass_data_len > max_length)
+ return 1;
+ field->store(cass_data, cass_data_len,field->charset());
+ return 0;
+ }
+
+ bool mariadb_to_cassandra(char **cass_data, int *cass_data_len)
+ {
+ String *pstr= field->val_str(&buf);
+ *cass_data= (char*)pstr->ptr();
+ *cass_data_len= pstr->length();
+ return false;
+ }
+ StringCopyConverter(size_t max_length_arg) : max_length(max_length_arg) {}
+ ~StringCopyConverter(){}
+};
+
+
+class TimestampDataConverter : public ColumnDataConverter
+{
+ int64_t buf;
+public:
+ int cassandra_to_mariadb(const char *cass_data, int cass_data_len)
+ {
+ /* Cassandra data is milliseconds-since-epoch in network byte order */
+ int64_t tmp;
+ DBUG_ASSERT(cass_data_len==8);
+ flip64(cass_data, (char*)&tmp);
+ /*
+ store_TIME's arguments:
+ - seconds since epoch
+ - microsecond fraction of a second.
+ */
+ ((Field_timestamp*)field)->store_TIME(tmp / 1000, (tmp % 1000)*1000);
+ return 0;
+ }
+
+ bool mariadb_to_cassandra(char **cass_data, int *cass_data_len)
+ {
+ my_time_t ts_time;
+ ulong ts_microsec;
+ int64_t tmp;
+ ts_time= ((Field_timestamp*)field)->get_timestamp(&ts_microsec);
+
+ /* Cassandra needs milliseconds-since-epoch */
+ tmp= ((int64_t)ts_time) * 1000 + ts_microsec/1000;
+ flip64((const char*)&tmp, (char*)&buf);
+
+ *cass_data= (char*)&buf;
+ *cass_data_len= 8;
+ return false;
+ }
+ ~TimestampDataConverter(){}
+};
+
+
+
+static int convert_hex_digit(const char c)
+{
+ int num;
+ if (c >= '0' && c <= '9')
+ num= c - '0';
+ else if (c >= 'A' && c <= 'F')
+ num= c - 'A' + 10;
+ else if (c >= 'a' && c <= 'f')
+ num= c - 'a' + 10;
+ else
+ return -1; /* Couldn't convert */
+ return num;
+}
+
+
+const char map2number[]="0123456789abcdef";
+
+static void convert_uuid2string(char *str, const char *cass_data)
+{
+ char *ptr= str;
+ /* UUID arrives as 16-byte number in network byte order */
+ for (uint i=0; i < 16; i++)
+ {
+ *(ptr++)= map2number[(cass_data[i] >> 4) & 0xF];
+ *(ptr++)= map2number[cass_data[i] & 0xF];
+ if (i == 3 || i == 5 || i == 7 || i == 9)
+ *(ptr++)= '-';
+ }
+ *ptr= 0;
+}
+
+static bool convert_string2uuid(char *buf, const char *str)
+{
+ int lower, upper;
+ for (uint i= 0; i < 16; i++)
+ {
+ if ((upper= convert_hex_digit(str[0])) == -1 ||
+ (lower= convert_hex_digit(str[1])) == -1)
+ {
+ return true;
+ }
+ buf[i]= lower | (upper << 4);
+ str += 2;
+ if (i == 3 || i == 5 || i == 7 || i == 9)
+ {
+ if (str[0] != '-')
+ return true;
+ str++;
+ }
+ }
+ return false;
+}
+
+
+class UuidDataConverter : public ColumnDataConverter
+{
+ char buf[16]; /* Binary UUID representation */
+ String str_buf;
+public:
+ int cassandra_to_mariadb(const char *cass_data, int cass_data_len)
+ {
+ DBUG_ASSERT(cass_data_len==16);
+ char str[37];
+ convert_uuid2string(str, cass_data);
+ field->store(str, 36,field->charset());
+ return 0;
+ }
+
+ bool mariadb_to_cassandra(char **cass_data, int *cass_data_len)
+ {
+ String *uuid_str= field->val_str(&str_buf);
+
+ if (uuid_str->length() != 36)
+ return true;
+
+ if (convert_string2uuid(buf, (char*)uuid_str->c_ptr()))
+ return true;
+ *cass_data= buf;
+ *cass_data_len= 16;
+ return false;
+ }
+ ~UuidDataConverter(){}
+};
+
+/**
+ Converting dynamic columns types to/from casandra types
+*/
+
+
+/**
+ Check and initialize (if it is needed) string MEM_ROOT
+*/
+static void alloc_strings_memroot(MEM_ROOT *mem_root)
+{
+ if (!alloc_root_inited(mem_root))
+ {
+ /*
+ The mem_root used to allocate UUID (of length 36 + \0) so make
+ appropriate allocated size
+ */
+ init_alloc_root(mem_root,
+ (36 + 1 + ALIGN_SIZE(sizeof(USED_MEM))) * 10 +
+ ALLOC_ROOT_MIN_BLOCK_SIZE,
+ (36 + 1 + ALIGN_SIZE(sizeof(USED_MEM))) * 10 +
+ ALLOC_ROOT_MIN_BLOCK_SIZE, MYF(MY_THREAD_SPECIFIC));
+ }
+}
+
+static void free_strings_memroot(MEM_ROOT *mem_root)
+{
+ if (alloc_root_inited(mem_root))
+ free_root(mem_root, MYF(0));
+}
+
+bool cassandra_to_dyncol_intLong(const char *cass_data,
+ int cass_data_len __attribute__((unused)),
+ DYNAMIC_COLUMN_VALUE *value,
+ MEM_ROOT *mem_root __attribute__((unused)))
+{
+ value->type= DYN_COL_INT;
+#ifdef WORDS_BIGENDIAN
+ value->x.long_value= (longlong *)*cass_data;
+#else
+ flip64(cass_data, (char *)&value->x.long_value);
+#endif
+ return 0;
+}
+
+bool dyncol_to_cassandraLong(DYNAMIC_COLUMN_VALUE *value,
+ char **cass_data, int *cass_data_len,
+ void* buff, void **freemem)
+{
+ longlong *tmp= (longlong *) buff;
+ enum enum_dyncol_func_result rc=
+ mariadb_dyncol_val_long(tmp, value);
+ if (rc < 0)
+ return true;
+ *cass_data_len= sizeof(longlong);
+#ifdef WORDS_BIGENDIAN
+ *cass_data= (char *)buff;
+#else
+ flip64((char *)buff, (char *)buff + sizeof(longlong));
+ *cass_data= (char *)buff + sizeof(longlong);
+#endif
+ *freemem= NULL;
+ return false;
+}
+
+bool cassandra_to_dyncol_intInt32(const char *cass_data,
+ int cass_data_len __attribute__((unused)),
+ DYNAMIC_COLUMN_VALUE *value,
+ MEM_ROOT *mem_root __attribute__((unused)))
+{
+ int32 tmp;
+ value->type= DYN_COL_INT;
+#ifdef WORDS_BIGENDIAN
+ tmp= *((int32 *)cass_data);
+#else
+ flip32(cass_data, (char *)&tmp);
+#endif
+ value->x.long_value= tmp;
+ return 0;
+}
+
+
+bool dyncol_to_cassandraInt32(DYNAMIC_COLUMN_VALUE *value,
+ char **cass_data, int *cass_data_len,
+ void* buff, void **freemem)
+{
+ longlong *tmp= (longlong *) ((char *)buff + sizeof(longlong));
+ enum enum_dyncol_func_result rc=
+ mariadb_dyncol_val_long(tmp, value);
+ if (rc < 0)
+ return true;
+ *cass_data_len= sizeof(int32);
+ *cass_data= (char *)buff;
+#ifdef WORDS_BIGENDIAN
+ *((int32 *) buff) = (int32) *tmp;
+#else
+ {
+ int32 tmp2= (int32) *tmp;
+ flip32((char *)&tmp2, (char *)buff);
+ }
+#endif
+ *freemem= NULL;
+ return false;
+}
+
+
+bool cassandra_to_dyncol_intCounter(const char *cass_data,
+ int cass_data_len __attribute__((unused)),
+ DYNAMIC_COLUMN_VALUE *value,
+ MEM_ROOT *mem_root __attribute__((unused)))
+{
+ value->type= DYN_COL_INT;
+ value->x.long_value= *((longlong *)cass_data);
+ return 0;
+}
+
+
+bool dyncol_to_cassandraCounter(DYNAMIC_COLUMN_VALUE *value,
+ char **cass_data, int *cass_data_len,
+ void* buff, void **freemem)
+{
+ longlong *tmp= (longlong *)buff;
+ enum enum_dyncol_func_result rc=
+ mariadb_dyncol_val_long(tmp, value);
+ if (rc < 0)
+ return true;
+ *cass_data_len= sizeof(longlong);
+ *cass_data= (char *)buff;
+ *freemem= NULL;
+ return false;
+}
+
+bool cassandra_to_dyncol_doubleFloat(const char *cass_data,
+ int cass_data_len __attribute__((unused)),
+ DYNAMIC_COLUMN_VALUE *value,
+ MEM_ROOT *mem_root __attribute__((unused)))
+{
+ value->type= DYN_COL_DOUBLE;
+ value->x.double_value= *((float *)cass_data);
+ return 0;
+}
+
+bool dyncol_to_cassandraFloat(DYNAMIC_COLUMN_VALUE *value,
+ char **cass_data, int *cass_data_len,
+ void* buff, void **freemem)
+{
+ double tmp;
+ enum enum_dyncol_func_result rc=
+ mariadb_dyncol_val_double(&tmp, value);
+ if (rc < 0)
+ return true;
+ *((float *)buff)= (float) tmp;
+ *cass_data_len= sizeof(float);
+ *cass_data= (char *)buff;
+ *freemem= NULL;
+ return false;
+}
+
+bool cassandra_to_dyncol_doubleDouble(const char *cass_data,
+ int cass_data_len __attribute__((unused)),
+ DYNAMIC_COLUMN_VALUE *value,
+ MEM_ROOT *mem_root
+ __attribute__((unused)))
+{
+ value->type= DYN_COL_DOUBLE;
+ value->x.double_value= *((double *)cass_data);
+ return 0;
+}
+
+bool dyncol_to_cassandraDouble(DYNAMIC_COLUMN_VALUE *value,
+ char **cass_data, int *cass_data_len,
+ void* buff, void **freemem)
+{
+ double *tmp= (double *)buff;
+ enum enum_dyncol_func_result rc=
+ mariadb_dyncol_val_double(tmp, value);
+ if (rc < 0)
+ return true;
+ *cass_data_len= sizeof(double);
+ *cass_data= (char *)buff;
+ *freemem= NULL;
+ return false;
+}
+
+bool cassandra_to_dyncol_strStr(const char *cass_data,
+ int cass_data_len,
+ DYNAMIC_COLUMN_VALUE *value,
+ CHARSET_INFO *cs)
+{
+ value->type= DYN_COL_STRING;
+ value->x.string.charset= cs;
+ value->x.string.value.str= (char *)cass_data;
+ value->x.string.value.length= cass_data_len;
+ return 0;
+}
+
+bool dyncol_to_cassandraStr(DYNAMIC_COLUMN_VALUE *value,
+ char **cass_data, int *cass_data_len,
+ void* buff, void **freemem, CHARSET_INFO *cs)
+{
+ DYNAMIC_STRING tmp;
+ if (init_dynamic_string(&tmp, NULL, 1024, 1024))
+ return 1;
+ enum enum_dyncol_func_result rc=
+ mariadb_dyncol_val_str(&tmp, value, cs, '\0');
+ if (rc < 0)
+ {
+ dynstr_free(&tmp);
+ return 1;
+ }
+ *cass_data_len= tmp.length;
+ *(cass_data)= tmp.str;
+ *freemem= tmp.str;
+ return 0;
+}
+
+bool cassandra_to_dyncol_strBytes(const char *cass_data,
+ int cass_data_len,
+ DYNAMIC_COLUMN_VALUE *value,
+ MEM_ROOT *mem_root __attribute__((unused)))
+{
+ return cassandra_to_dyncol_strStr(cass_data, cass_data_len, value,
+ &my_charset_bin);
+}
+
+bool dyncol_to_cassandraBytes(DYNAMIC_COLUMN_VALUE *value,
+ char **cass_data, int *cass_data_len,
+ void* buff, void **freemem)
+{
+ return dyncol_to_cassandraStr(value, cass_data, cass_data_len,
+ buff, freemem, &my_charset_bin);
+}
+
+bool cassandra_to_dyncol_strAscii(const char *cass_data,
+ int cass_data_len,
+ DYNAMIC_COLUMN_VALUE *value,
+ MEM_ROOT *mem_root __attribute__((unused)))
+{
+ return cassandra_to_dyncol_strStr(cass_data, cass_data_len, value,
+ &my_charset_latin1_bin);
+}
+
+bool dyncol_to_cassandraAscii(DYNAMIC_COLUMN_VALUE *value,
+ char **cass_data, int *cass_data_len,
+ void* buff, void **freemem)
+{
+ return dyncol_to_cassandraStr(value, cass_data, cass_data_len,
+ buff, freemem, &my_charset_latin1_bin);
+}
+
+bool cassandra_to_dyncol_strUTF8(const char *cass_data,
+ int cass_data_len,
+ DYNAMIC_COLUMN_VALUE *value,
+ MEM_ROOT *mem_root __attribute__((unused)))
+{
+ return cassandra_to_dyncol_strStr(cass_data, cass_data_len, value,
+ &my_charset_utf8_unicode_ci);
+}
+
+bool dyncol_to_cassandraUTF8(DYNAMIC_COLUMN_VALUE *value,
+ char **cass_data, int *cass_data_len,
+ void* buff, void **freemem)
+{
+ return dyncol_to_cassandraStr(value, cass_data, cass_data_len,
+ buff, freemem, &my_charset_utf8_unicode_ci);
+}
+
+bool cassandra_to_dyncol_strUUID(const char *cass_data,
+ int cass_data_len,
+ DYNAMIC_COLUMN_VALUE *value,
+ MEM_ROOT *mem_root)
+{
+ value->type= DYN_COL_STRING;
+ value->x.string.charset= &my_charset_bin;
+ alloc_strings_memroot(mem_root);
+ value->x.string.value.str= (char *)alloc_root(mem_root, 37);
+ if (!value->x.string.value.str)
+ {
+ value->x.string.value.length= 0;
+ return 1;
+ }
+ convert_uuid2string(value->x.string.value.str, cass_data);
+ value->x.string.value.length= 36;
+ return 0;
+}
+
+bool dyncol_to_cassandraUUID(DYNAMIC_COLUMN_VALUE *value,
+ char **cass_data, int *cass_data_len,
+ void* buff, void **freemem)
+{
+ DYNAMIC_STRING tmp;
+ if (init_dynamic_string(&tmp, NULL, 1024, 1024))
+ return true;
+ enum enum_dyncol_func_result rc=
+ mariadb_dyncol_val_str(&tmp, value, &my_charset_latin1_bin, '\0');
+ if (rc < 0 || tmp.length != 36 || convert_string2uuid((char *)buff, tmp.str))
+ {
+ dynstr_free(&tmp);
+ return true;
+ }
+
+ *cass_data_len= tmp.length;
+ *(cass_data)= tmp.str;
+ *freemem= tmp.str;
+ return 0;
+}
+
+bool cassandra_to_dyncol_intBool(const char *cass_data,
+ int cass_data_len,
+ DYNAMIC_COLUMN_VALUE *value,
+ MEM_ROOT *mem_root __attribute__((unused)))
+{
+ value->type= DYN_COL_INT;
+ value->x.long_value= (cass_data[0] ? 1 : 0);
+ return 0;
+}
+
+bool dyncol_to_cassandraBool(DYNAMIC_COLUMN_VALUE *value,
+ char **cass_data, int *cass_data_len,
+ void* buff, void **freemem)
+{
+ longlong tmp;
+ enum enum_dyncol_func_result rc=
+ mariadb_dyncol_val_long(&tmp, value);
+ if (rc < 0)
+ return true;
+ ((char *)buff)[0]= (tmp ? 1 : 0);
+ *cass_data_len= 1;
+ *(cass_data)= (char *)buff;
+ *freemem= 0;
+ return 0;
+}
+
+
+const char * const validator_bigint= "org.apache.cassandra.db.marshal.LongType";
+const char * const validator_int= "org.apache.cassandra.db.marshal.Int32Type";
+const char * const validator_counter= "org.apache.cassandra.db.marshal.CounterColumnType";
+
+const char * const validator_float= "org.apache.cassandra.db.marshal.FloatType";
+const char * const validator_double= "org.apache.cassandra.db.marshal.DoubleType";
+
+const char * const validator_blob= "org.apache.cassandra.db.marshal.BytesType";
+const char * const validator_ascii= "org.apache.cassandra.db.marshal.AsciiType";
+const char * const validator_text= "org.apache.cassandra.db.marshal.UTF8Type";
+
+const char * const validator_timestamp="org.apache.cassandra.db.marshal.DateType";
+
+const char * const validator_uuid= "org.apache.cassandra.db.marshal.UUIDType";
+
+const char * const validator_boolean= "org.apache.cassandra.db.marshal.BooleanType";
+
+/* VARINTs are stored as big-endian big numbers. */
+const char * const validator_varint= "org.apache.cassandra.db.marshal.IntegerType";
+const char * const validator_decimal= "org.apache.cassandra.db.marshal.DecimalType";
+
+
+static CASSANDRA_TYPE_DEF cassandra_types[]=
+{
+ {
+ validator_bigint,
+ &cassandra_to_dyncol_intLong,
+ &dyncol_to_cassandraLong
+ },
+ {
+ validator_int,
+ &cassandra_to_dyncol_intInt32,
+ &dyncol_to_cassandraInt32
+ },
+ {
+ validator_counter,
+ cassandra_to_dyncol_intCounter,
+ &dyncol_to_cassandraCounter
+ },
+ {
+ validator_float,
+ &cassandra_to_dyncol_doubleFloat,
+ &dyncol_to_cassandraFloat
+ },
+ {
+ validator_double,
+ &cassandra_to_dyncol_doubleDouble,
+ &dyncol_to_cassandraDouble
+ },
+ {
+ validator_blob,
+ &cassandra_to_dyncol_strBytes,
+ &dyncol_to_cassandraBytes
+ },
+ {
+ validator_ascii,
+ &cassandra_to_dyncol_strAscii,
+ &dyncol_to_cassandraAscii
+ },
+ {
+ validator_text,
+ &cassandra_to_dyncol_strUTF8,
+ &dyncol_to_cassandraUTF8
+ },
+ {
+ validator_timestamp,
+ &cassandra_to_dyncol_intLong,
+ &dyncol_to_cassandraLong
+ },
+ {
+ validator_uuid,
+ &cassandra_to_dyncol_strUUID,
+ &dyncol_to_cassandraUUID
+ },
+ {
+ validator_boolean,
+ &cassandra_to_dyncol_intBool,
+ &dyncol_to_cassandraBool
+ },
+ {
+ validator_varint,
+ &cassandra_to_dyncol_strBytes,
+ &dyncol_to_cassandraBytes
+ },
+ {
+ validator_decimal,
+ &cassandra_to_dyncol_strBytes,
+ &dyncol_to_cassandraBytes
+ }
+};
+
+CASSANDRA_TYPE get_cassandra_type(const char *validator)
+{
+ CASSANDRA_TYPE rc;
+ switch(validator[32])
+ {
+ case 'L':
+ rc= CT_BIGINT;
+ break;
+ case 'I':
+ rc= (validator[35] == '3' ? CT_INT : CT_VARINT);
+ rc= CT_INT;
+ break;
+ case 'C':
+ rc= CT_COUNTER;
+ break;
+ case 'F':
+ rc= CT_FLOAT;
+ break;
+ case 'D':
+ switch (validator[33])
+ {
+ case 'o':
+ rc= CT_DOUBLE;
+ break;
+ case 'a':
+ rc= CT_TIMESTAMP;
+ break;
+ case 'e':
+ rc= CT_DECIMAL;
+ break;
+ default:
+ rc= CT_BLOB;
+ break;
+ }
+ break;
+ case 'B':
+ rc= (validator[33] == 'o' ? CT_BOOLEAN : CT_BLOB);
+ break;
+ case 'A':
+ rc= CT_ASCII;
+ break;
+ case 'U':
+ rc= (validator[33] == 'T' ? CT_TEXT : CT_UUID);
+ break;
+ default:
+ rc= CT_BLOB;
+ }
+ DBUG_ASSERT(strcmp(cassandra_types[rc].name, validator) == 0);
+ return rc;
+}
+
+ColumnDataConverter *map_field_to_validator(Field *field, const char *validator_name)
+{
+ ColumnDataConverter *res= NULL;
+
+ switch(field->type()) {
+ case MYSQL_TYPE_TINY:
+ if (!strcmp(validator_name, validator_boolean))
+ {
+ res= new TinyintDataConverter;
+ break;
+ }
+ /* fall through: */
+ case MYSQL_TYPE_SHORT:
+ case MYSQL_TYPE_LONGLONG:
+ {
+ bool is_counter= false;
+ if (!strcmp(validator_name, validator_bigint) ||
+ !strcmp(validator_name, validator_timestamp) ||
+ (is_counter= !strcmp(validator_name, validator_counter)))
+ res= new BigintDataConverter(!is_counter);
+ break;
+ }
+ case MYSQL_TYPE_FLOAT:
+ if (!strcmp(validator_name, validator_float))
+ res= new FloatDataConverter;
+ break;
+
+ case MYSQL_TYPE_DOUBLE:
+ if (!strcmp(validator_name, validator_double))
+ res= new DoubleDataConverter;
+ break;
+
+ case MYSQL_TYPE_TIMESTAMP:
+ if (!strcmp(validator_name, validator_timestamp))
+ res= new TimestampDataConverter;
+ break;
+
+ case MYSQL_TYPE_STRING: // these are space padded CHAR(n) strings.
+ if (!strcmp(validator_name, validator_uuid) &&
+ field->real_type() == MYSQL_TYPE_STRING &&
+ field->field_length == 36)
+ {
+ // UUID maps to CHAR(36), its text representation
+ res= new UuidDataConverter;
+ break;
+ }
+ /* fall through: */
+ case MYSQL_TYPE_VAR_STRING:
+ case MYSQL_TYPE_VARCHAR:
+ {
+ /*
+ Cassandra's "varint" type is a binary-encoded arbitary-length
+ big-endian number.
+ - It can be mapped to VARBINARY(N), with sufficiently big N.
+ - If the value does not fit into N bytes, it is an error. We should not
+ truncate it, because that is just as good as returning garbage.
+ - varint should not be mapped to BINARY(N), because BINARY(N) values
+ are zero-padded, which will work as multiplying the value by
+ 2^k for some value of k.
+ */
+ if (field->type() == MYSQL_TYPE_VARCHAR &&
+ field->binary() &&
+ (!strcmp(validator_name, validator_varint) ||
+ !strcmp(validator_name, validator_decimal)))
+ {
+ res= new StringCopyConverter(field->field_length);
+ break;
+ }
+
+ if (!strcmp(validator_name, validator_blob) ||
+ !strcmp(validator_name, validator_ascii) ||
+ !strcmp(validator_name, validator_text))
+ {
+ res= new StringCopyConverter((size_t)-1);
+ }
+ break;
+ }
+ case MYSQL_TYPE_LONG:
+ if (!strcmp(validator_name, validator_int))
+ res= new Int32DataConverter;
+ break;
+
+ default:;
+ }
+ return res;
+}
+
+
+bool ha_cassandra::setup_field_converters(Field **field_arg, uint n_fields)
+{
+ char *col_name;
+ int col_name_len;
+ char *col_type;
+ int col_type_len;
+ size_t ddl_fields= se->get_ddl_size();
+ const char *default_type= se->get_default_validator();
+ uint max_non_default_fields;
+ DBUG_ENTER("ha_cassandra::setup_field_converters");
+ DBUG_ASSERT(default_type);
+
+ DBUG_ASSERT(!field_converters);
+ DBUG_ASSERT(dyncol_set == 0 || dyncol_set == 1);
+
+ /*
+ We always should take into account that in case of using dynamic columns
+ sql description contain one field which does not described in
+ Cassandra DDL also key field is described separately. So that
+ is why we use "n_fields - dyncol_set - 1" or "ddl_fields + 2".
+ */
+ max_non_default_fields= ddl_fields + 2 - n_fields;
+ if (ddl_fields < (n_fields - dyncol_set - 1))
+ {
+ se->print_error("Some of SQL fields were not mapped to Cassandra's fields");
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+ DBUG_RETURN(true);
+ }
+
+ /* allocate memory in one chunk */
+ size_t memsize= sizeof(ColumnDataConverter*) * n_fields +
+ (sizeof(LEX_STRING) + sizeof(CASSANDRA_TYPE_DEF))*
+ (dyncol_set ? max_non_default_fields : 0);
+ if (!(field_converters= (ColumnDataConverter**)my_malloc(memsize, MYF(0))))
+ DBUG_RETURN(true);
+ bzero(field_converters, memsize);
+ n_field_converters= n_fields;
+
+ if (dyncol_set)
+ {
+ special_type_field_converters=
+ (CASSANDRA_TYPE_DEF *)(field_converters + n_fields);
+ special_type_field_names=
+ ((LEX_STRING*)(special_type_field_converters + max_non_default_fields));
+
+ if (my_init_dynamic_array(&dynamic_values,
+ sizeof(DYNAMIC_COLUMN_VALUE),
+ DYNCOL_USUAL, DYNCOL_DELTA, MYF(0)))
+ DBUG_RETURN(true);
+ else
+ if (my_init_dynamic_array(&dynamic_names,
+ sizeof(LEX_STRING),
+ DYNCOL_USUAL, DYNCOL_DELTA,MYF(0)))
+ {
+ delete_dynamic(&dynamic_values);
+ DBUG_RETURN(true);
+ }
+ else
+ if (init_dynamic_string(&dynamic_rec, NULL,
+ DYNCOL_USUAL_REC, DYNCOL_DELTA_REC))
+ {
+ delete_dynamic(&dynamic_values);
+ delete_dynamic(&dynamic_names);
+ DBUG_RETURN(true);
+ }
+
+ /* Dynamic column field has special processing */
+ field_converters[dyncol_field]= NULL;
+
+ default_type_def= cassandra_types + get_cassandra_type(default_type);
+ }
+
+ se->first_ddl_column();
+ uint n_mapped= 0;
+ while (!se->next_ddl_column(&col_name, &col_name_len, &col_type,
+ &col_type_len))
+ {
+ Field **field;
+ uint i;
+ /* Mapping for the 1st field is already known */
+ for (field= field_arg + 1, i= 1; *field; field++, i++)
+ {
+ if ((!dyncol_set || dyncol_field != i) &&
+ !strcmp((*field)->field_name, col_name))
+ {
+ n_mapped++;
+ ColumnDataConverter **conv= field_converters + (*field)->field_index;
+ if (!(*conv= map_field_to_validator(*field, col_type)))
+ {
+ se->print_error("Failed to map column %s to datatype %s",
+ (*field)->field_name, col_type);
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+ DBUG_RETURN(true);
+ }
+ (*conv)->field= *field;
+ break;
+ }
+ }
+ if (dyncol_set && !(*field)) // is needed and not found
+ {
+ DBUG_PRINT("info",("Field not found: %s", col_name));
+ if (strcmp(col_type, default_type))
+ {
+ DBUG_PRINT("info",("Field '%s' non-default type: '%s'",
+ col_name, col_type));
+ special_type_field_names[n_special_type_fields].length= col_name_len;
+ special_type_field_names[n_special_type_fields].str= col_name;
+ special_type_field_converters[n_special_type_fields]=
+ cassandra_types[get_cassandra_type(col_type)];
+ n_special_type_fields++;
+ }
+ }
+ }
+
+ if (n_mapped != n_fields - 1 - dyncol_set)
+ {
+ Field *first_unmapped= NULL;
+ /* Find the first field */
+ for (uint i= 1; i < n_fields;i++)
+ {
+ if (!field_converters[i])
+ {
+ first_unmapped= field_arg[i];
+ break;
+ }
+ }
+ DBUG_ASSERT(first_unmapped);
+
+ se->print_error("Field `%s` could not be mapped to any field in Cassandra",
+ first_unmapped->field_name);
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+ DBUG_RETURN(true);
+ }
+
+ /*
+ Setup type conversion for row_key.
+ */
+ se->get_rowkey_type(&col_name, &col_type);
+ if (col_name && strcmp(col_name, (*field_arg)->field_name))
+ {
+ se->print_error("PRIMARY KEY column must match Cassandra's name '%s'",
+ col_name);
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+ DBUG_RETURN(true);
+ }
+ if (!col_name && strcmp("rowkey", (*field_arg)->field_name))
+ {
+ se->print_error("target column family has no key_alias defined, "
+ "PRIMARY KEY column must be named 'rowkey'");
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+ DBUG_RETURN(true);
+ }
+
+ if (col_type != NULL)
+ {
+ if (!(rowkey_converter= map_field_to_validator(*field_arg, col_type)))
+ {
+ se->print_error("Failed to map PRIMARY KEY to datatype %s", col_type);
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+ DBUG_RETURN(true);
+ }
+ rowkey_converter->field= *field_arg;
+ }
+ else
+ {
+ se->print_error("Cassandra's rowkey has no defined datatype (todo: support this)");
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+ DBUG_RETURN(true);
+ }
+
+ DBUG_RETURN(false);
+}
+
+
+void ha_cassandra::free_field_converters()
+{
+ delete rowkey_converter;
+ rowkey_converter= NULL;
+
+ if (dyncol_set)
+ {
+ delete_dynamic(&dynamic_values);
+ delete_dynamic(&dynamic_names);
+ dynstr_free(&dynamic_rec);
+ }
+ if (field_converters)
+ {
+ for (uint i=0; i < n_field_converters; i++)
+ if (field_converters[i])
+ {
+ DBUG_ASSERT(!dyncol_set || i != dyncol_field);
+ delete field_converters[i];
+ }
+ my_free(field_converters);
+ field_converters= NULL;
+ }
+}
+
+
+int ha_cassandra::index_init(uint idx, bool sorted)
+{
+ int ires;
+ if (!se && (ires= connect_and_check_options(table)))
+ return ires;
+ return 0;
+}
+
+void store_key_image_to_rec(Field *field, uchar *ptr, uint len);
+
+int ha_cassandra::index_read_map(uchar *buf, const uchar *key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
+{
+ int rc= 0;
+ DBUG_ENTER("ha_cassandra::index_read_map");
+
+ if (find_flag != HA_READ_KEY_EXACT)
+ {
+ DBUG_ASSERT(0); /* Non-equality lookups should never be done */
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ }
+
+ uint key_len= calculate_key_len(table, active_index, key, keypart_map);
+ store_key_image_to_rec(table->field[0], (uchar*)key, key_len);
+
+ char *cass_key;
+ int cass_key_len;
+ my_bitmap_map *old_map;
+
+ old_map= dbug_tmp_use_all_columns(table, table->read_set);
+
+ if (rowkey_converter->mariadb_to_cassandra(&cass_key, &cass_key_len))
+ {
+ /* We get here when making lookups like uuid_column='not-an-uuid' */
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ DBUG_RETURN(HA_ERR_KEY_NOT_FOUND);
+ }
+
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+
+ bool found;
+ if (se->get_slice(cass_key, cass_key_len, &found))
+ {
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+ rc= HA_ERR_INTERNAL_ERROR;
+ }
+
+ /* TODO: what if we're not reading all columns?? */
+ if (!found)
+ rc= HA_ERR_KEY_NOT_FOUND;
+ else
+ rc= read_cassandra_columns(false);
+
+ DBUG_RETURN(rc);
+}
+
+
+void ha_cassandra::print_conversion_error(const char *field_name,
+ char *cass_value,
+ int cass_value_len)
+{
+ char buf[32];
+ char *p= cass_value;
+ size_t i= 0;
+ for (; (i < sizeof(buf)-1) && (p < cass_value + cass_value_len); p++)
+ {
+ buf[i++]= map2number[(*p >> 4) & 0xF];
+ buf[i++]= map2number[*p & 0xF];
+ }
+ buf[i]=0;
+
+ se->print_error("Unable to convert value for field `%s` from Cassandra's data"
+ " format. Source data is %d bytes, 0x%s%s",
+ field_name, cass_value_len, buf,
+ (i == sizeof(buf) - 1)? "..." : "");
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+}
+
+
+
+CASSANDRA_TYPE_DEF * ha_cassandra::get_cassandra_field_def(char *cass_name,
+ int cass_name_len)
+{
+ CASSANDRA_TYPE_DEF *type= default_type_def;
+ for(uint i= 0; i < n_special_type_fields; i++)
+ {
+ if (cass_name_len == (int)special_type_field_names[i].length &&
+ memcmp(cass_name, special_type_field_names[i].str,
+ cass_name_len) == 0)
+ {
+ type= special_type_field_converters + i;
+ break;
+ }
+ }
+ return type;
+}
+
+int ha_cassandra::read_cassandra_columns(bool unpack_pk)
+{
+ MEM_ROOT strings_root;
+ char *cass_name;
+ char *cass_value;
+ int cass_value_len, cass_name_len;
+ Field **field;
+ int res= 0;
+ ulong total_name_len= 0;
+
+ clear_alloc_root(&strings_root);
+ /*
+ cassandra_to_mariadb() calls will use field->store(...) methods, which
+ require that the column is in the table->write_set
+ */
+ my_bitmap_map *old_map;
+ old_map= dbug_tmp_use_all_columns(table, table->write_set);
+
+ /* Start with all fields being NULL */
+ for (field= table->field + 1; *field; field++)
+ (*field)->set_null();
+
+ while (!se->get_next_read_column(&cass_name, &cass_name_len,
+ &cass_value, &cass_value_len))
+ {
+ // map to our column. todo: use hash or something..
+ bool found= 0;
+ for (field= table->field + 1; *field; field++)
+ {
+ uint fieldnr= (*field)->field_index;
+ if ((!dyncol_set || dyncol_field != fieldnr) &&
+ !strcmp((*field)->field_name, cass_name))
+ {
+ found= 1;
+ (*field)->set_notnull();
+ if (field_converters[fieldnr]->cassandra_to_mariadb(cass_value,
+ cass_value_len))
+ {
+ print_conversion_error((*field)->field_name, cass_value,
+ cass_value_len);
+ res=1;
+ goto err;
+ }
+ break;
+ }
+ }
+ if (dyncol_set && !found)
+ {
+ DYNAMIC_COLUMN_VALUE val;
+ LEX_STRING nm;
+ CASSANDRA_TYPE_DEF *type= get_cassandra_field_def(cass_name,
+ cass_name_len);
+ nm.str= cass_name;
+ nm.length= cass_name_len;
+ if (nm.length > MAX_NAME_LENGTH)
+ {
+ se->print_error("Unable to convert value for field `%s`"
+ " from Cassandra's data format. Name"
+ " length exceed limit of %u: '%s'",
+ table->field[dyncol_field]->field_name,
+ (uint)MAX_NAME_LENGTH, cass_name);
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+ res=1;
+ goto err;
+ }
+ total_name_len+= cass_name_len;
+ if (nm.length > MAX_TOTAL_NAME_LENGTH)
+ {
+ se->print_error("Unable to convert value for field `%s`"
+ " from Cassandra's data format. Sum of all names"
+ " length exceed limit of %lu",
+ table->field[dyncol_field]->field_name,
+ cass_name, (uint)MAX_TOTAL_NAME_LENGTH);
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+ res=1;
+ goto err;
+ }
+
+ if ((res= (*(type->cassandra_to_dynamic))(cass_value,
+ cass_value_len, &val,
+ &strings_root)) ||
+ insert_dynamic(&dynamic_names, (uchar *) &nm) ||
+ insert_dynamic(&dynamic_values, (uchar *) &val))
+ {
+ if (res)
+ {
+ print_conversion_error(cass_name, cass_value, cass_value_len);
+ }
+ free_strings_memroot(&strings_root);
+ // EOM shouldm be already reported if happened
+ res= 1;
+ goto err;
+ }
+ }
+ }
+
+ dynamic_rec.length= 0;
+ if (dyncol_set)
+ {
+ if (mariadb_dyncol_create_many_named(&dynamic_rec,
+ dynamic_names.elements,
+ (LEX_STRING *)dynamic_names.buffer,
+ (DYNAMIC_COLUMN_VALUE *)
+ dynamic_values.buffer,
+ FALSE) < 0)
+ dynamic_rec.length= 0;
+
+ free_strings_memroot(&strings_root);
+ dynamic_values.elements= dynamic_names.elements= 0;
+
+ if (dynamic_rec.length == 0)
+ table->field[dyncol_field]->set_null();
+ else
+ {
+ Field_blob *blob= (Field_blob *)table->field[dyncol_field];
+ blob->set_notnull();
+ blob->store_length(dynamic_rec.length);
+ *((char **)(((char *)blob->ptr) + blob->pack_length_no_ptr()))=
+ dynamic_rec.str;
+ }
+ }
+
+ if (unpack_pk)
+ {
+ /* Unpack rowkey to primary key */
+ field= table->field;
+ (*field)->set_notnull();
+ se->get_read_rowkey(&cass_value, &cass_value_len);
+ if (rowkey_converter->cassandra_to_mariadb(cass_value, cass_value_len))
+ {
+ print_conversion_error((*field)->field_name, cass_value, cass_value_len);
+ res=1;
+ goto err;
+ }
+ }
+
+err:
+ dbug_tmp_restore_column_map(table->write_set, old_map);
+ return res;
+}
+
+int ha_cassandra::read_dyncol(uint *count,
+ DYNAMIC_COLUMN_VALUE **vals,
+ LEX_STRING **names,
+ String *valcol)
+{
+ String *strcol;
+ DYNAMIC_COLUMN col;
+
+ enum enum_dyncol_func_result rc;
+ DBUG_ENTER("ha_cassandra::read_dyncol");
+
+ Field *field= table->field[dyncol_field];
+ DBUG_ASSERT(field->type() == MYSQL_TYPE_BLOB);
+ /* It is blob and it does not use buffer */
+ strcol= field->val_str(NULL, valcol);
+ if (field->is_null())
+ {
+ *count= 0;
+ *names= 0;
+ *vals= 0;
+ DBUG_RETURN(0); // nothing to write
+ }
+ /*
+ dynamic_column_vals only read the string so we can
+ cheat here with assignment
+ */
+ bzero(&col, sizeof(col));
+ col.str= (char *)strcol->ptr();
+ col.length= strcol->length();
+ if ((rc= mariadb_dyncol_unpack(&col, count, names, vals)) < 0)
+ {
+ dynamic_column_error_message(rc);
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ DBUG_RETURN(0);
+}
+
+int ha_cassandra::write_dynamic_row(uint count,
+ DYNAMIC_COLUMN_VALUE *vals,
+ LEX_STRING *names)
+{
+ uint i;
+ DBUG_ENTER("ha_cassandra::write_dynamic_row");
+ DBUG_ASSERT(dyncol_set);
+
+
+ for (i= 0; i < count; i++)
+ {
+ char buff[16];
+ CASSANDRA_TYPE_DEF *type;
+ void *freemem= NULL;
+ char *cass_data;
+ int cass_data_len;
+
+ DBUG_PRINT("info", ("field %*s", (int)names[i].length, names[i].str));
+ type= get_cassandra_field_def(names[i].str, (int) names[i].length);
+ if ((*type->dynamic_to_cassandra)(vals +i, &cass_data, &cass_data_len,
+ buff, &freemem))
+ {
+ my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
+ names[i].str, insert_lineno);
+ DBUG_RETURN(HA_ERR_GENERIC);
+ }
+ se->add_insert_column(names[i].str, names[i].length,
+ cass_data, cass_data_len);
+ if (freemem)
+ my_free(freemem);
+ }
+ DBUG_RETURN(0);
+}
+
+void ha_cassandra::free_dynamic_row(DYNAMIC_COLUMN_VALUE **vals,
+ LEX_STRING **names)
+{
+ if (*vals)
+ {
+ my_free(*vals);
+ *vals= 0;
+ }
+ if (*names)
+ {
+ my_free(*names);
+ *names= 0;
+ }
+}
+
+int ha_cassandra::write_row(uchar *buf)
+{
+ my_bitmap_map *old_map;
+ int ires;
+ DBUG_ENTER("ha_cassandra::write_row");
+
+ if (!se && (ires= connect_and_check_options(table)))
+ DBUG_RETURN(ires);
+
+ if (!doing_insert_batch)
+ se->clear_insert_buffer();
+
+ old_map= dbug_tmp_use_all_columns(table, table->read_set);
+
+ insert_lineno++;
+
+ /* Convert the key */
+ char *cass_key;
+ int cass_key_len;
+ if (rowkey_converter->mariadb_to_cassandra(&cass_key, &cass_key_len))
+ {
+ my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
+ rowkey_converter->field->field_name, insert_lineno);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ se->start_row_insert(cass_key, cass_key_len);
+
+ /* Convert other fields */
+ for (uint i= 1; i < table->s->fields; i++)
+ {
+ char *cass_data;
+ int cass_data_len;
+ if (dyncol_set && dyncol_field == i)
+ {
+ String valcol;
+ DYNAMIC_COLUMN_VALUE *vals;
+ LEX_STRING *names;
+ uint count;
+ int rc;
+ DBUG_ASSERT(field_converters[i] == NULL);
+ if (!(rc= read_dyncol(&count, &vals, &names, &valcol)))
+ rc= write_dynamic_row(count, vals, names);
+ free_dynamic_row(&vals, &names);
+ if (rc)
+ {
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ DBUG_RETURN(rc);
+ }
+ }
+ else
+ {
+ if (field_converters[i]->mariadb_to_cassandra(&cass_data,
+ &cass_data_len))
+ {
+ my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
+ field_converters[i]->field->field_name, insert_lineno);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ se->add_insert_column(field_converters[i]->field->field_name, 0,
+ cass_data, cass_data_len);
+ }
+ }
+
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+
+ bool res;
+
+ if (doing_insert_batch)
+ {
+ res= 0;
+ if (++insert_rows_batched >= THDVAR(table->in_use, insert_batch_size))
+ {
+ res= se->do_insert();
+ insert_rows_batched= 0;
+ }
+ }
+ else
+ res= se->do_insert();
+
+ if (res)
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+
+ DBUG_RETURN(res? HA_ERR_INTERNAL_ERROR: 0);
+}
+
+
+void ha_cassandra::start_bulk_insert(ha_rows rows, uint flags)
+{
+ int ires;
+ if (!se && (ires= connect_and_check_options(table)))
+ return;
+
+ doing_insert_batch= true;
+ insert_rows_batched= 0;
+
+ se->clear_insert_buffer();
+}
+
+
+int ha_cassandra::end_bulk_insert()
+{
+ DBUG_ENTER("ha_cassandra::end_bulk_insert");
+
+ /* Flush out the insert buffer */
+ doing_insert_batch= false;
+ bool bres= se->do_insert();
+ se->clear_insert_buffer();
+
+ DBUG_RETURN(bres? HA_ERR_INTERNAL_ERROR: 0);
+}
+
+
+int ha_cassandra::rnd_init(bool scan)
+{
+ bool bres;
+ int ires;
+ DBUG_ENTER("ha_cassandra::rnd_init");
+
+ if (!se && (ires= connect_and_check_options(table)))
+ DBUG_RETURN(ires);
+
+ if (!scan)
+ {
+ /* Prepare for rnd_pos() calls. We don't need to anything. */
+ DBUG_RETURN(0);
+ }
+
+ if (dyncol_set)
+ {
+ se->clear_read_all_columns();
+ }
+ else
+ {
+ se->clear_read_columns();
+ for (uint i= 1; i < table->s->fields; i++)
+ se->add_read_column(table->field[i]->field_name);
+ }
+
+ se->read_batch_size= THDVAR(table->in_use, rnd_batch_size);
+ bres= se->get_range_slices(false);
+ if (bres)
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+
+ DBUG_RETURN(bres? HA_ERR_INTERNAL_ERROR: 0);
+}
+
+
+int ha_cassandra::rnd_end()
+{
+ DBUG_ENTER("ha_cassandra::rnd_end");
+
+ se->finish_reading_range_slices();
+ DBUG_RETURN(0);
+}
+
+
+int ha_cassandra::rnd_next(uchar *buf)
+{
+ int rc;
+ bool reached_eof;
+ DBUG_ENTER("ha_cassandra::rnd_next");
+
+ // Unpack and return the next record.
+ if (se->get_next_range_slice_row(&reached_eof))
+ {
+ rc= HA_ERR_INTERNAL_ERROR;
+ }
+ else
+ {
+ if (reached_eof)
+ rc= HA_ERR_END_OF_FILE;
+ else
+ rc= read_cassandra_columns(true);
+ }
+
+ DBUG_RETURN(rc);
+}
+
+
+int ha_cassandra::delete_all_rows()
+{
+ bool bres;
+ int ires;
+ DBUG_ENTER("ha_cassandra::delete_all_rows");
+
+ if (!se && (ires= connect_and_check_options(table)))
+ DBUG_RETURN(ires);
+
+ bres= se->truncate();
+
+ if (bres)
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+
+ DBUG_RETURN(bres? HA_ERR_INTERNAL_ERROR: 0);
+}
+
+
+int ha_cassandra::delete_row(const uchar *buf)
+{
+ bool bres;
+ DBUG_ENTER("ha_cassandra::delete_row");
+
+ bres= se->remove_row();
+
+ if (bres)
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+
+ DBUG_RETURN(bres? HA_ERR_INTERNAL_ERROR: 0);
+}
+
+
+int ha_cassandra::info(uint flag)
+{
+ DBUG_ENTER("ha_cassandra::info");
+
+ if (!table)
+ return 1;
+
+ if (flag & HA_STATUS_VARIABLE)
+ {
+ stats.records= 1000;
+ stats.deleted= 0;
+ }
+ if (flag & HA_STATUS_CONST)
+ {
+ ref_length= table->field[0]->key_length();
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+void key_copy(uchar *to_key, uchar *from_record, KEY *key_info,
+ uint key_length, bool with_zerofill);
+
+
+void ha_cassandra::position(const uchar *record)
+{
+ DBUG_ENTER("ha_cassandra::position");
+
+ /* Copy the primary key to rowid */
+ key_copy(ref, (uchar*)record, &table->key_info[0],
+ table->field[0]->key_length(), true);
+
+ DBUG_VOID_RETURN;
+}
+
+
+int ha_cassandra::rnd_pos(uchar *buf, uchar *pos)
+{
+ int rc;
+ DBUG_ENTER("ha_cassandra::rnd_pos");
+
+ int save_active_index= active_index;
+ active_index= 0; /* The primary key */
+ rc= index_read_map(buf, pos, key_part_map(1), HA_READ_KEY_EXACT);
+
+ active_index= save_active_index;
+
+ DBUG_RETURN(rc);
+}
+
+
+int ha_cassandra::reset()
+{
+ doing_insert_batch= false;
+ insert_lineno= 0;
+ if (se)
+ {
+ se->set_consistency_levels(THDVAR(table->in_use, read_consistency),
+ THDVAR(table->in_use, write_consistency));
+ }
+ return 0;
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// MRR implementation
+/////////////////////////////////////////////////////////////////////////////
+
+
+/*
+ - The key can be only primary key
+ - allow equality-ranges only.
+ - anything else?
+*/
+ha_rows ha_cassandra::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
+ void *seq_init_param,
+ uint n_ranges, uint *bufsz,
+ uint *flags, COST_VECT *cost)
+{
+ /* No support for const ranges so far */
+ return HA_POS_ERROR;
+}
+
+
+ha_rows ha_cassandra::multi_range_read_info(uint keyno, uint n_ranges, uint keys,
+ uint key_parts, uint *bufsz,
+ uint *flags, COST_VECT *cost)
+{
+ /* Can only be equality lookups on the primary key... */
+ // TODO anything else?
+ *flags &= ~HA_MRR_USE_DEFAULT_IMPL;
+ *flags |= HA_MRR_NO_ASSOCIATION;
+
+ return 10;
+}
+
+
+int ha_cassandra::multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
+ uint n_ranges, uint mode, HANDLER_BUFFER *buf)
+{
+ int res;
+ mrr_iter= seq->init(seq_init_param, n_ranges, mode);
+ mrr_funcs= *seq;
+ res= mrr_start_read();
+ return (res? HA_ERR_INTERNAL_ERROR: 0);
+}
+
+
+bool ha_cassandra::mrr_start_read()
+{
+ uint key_len;
+
+ my_bitmap_map *old_map;
+ old_map= dbug_tmp_use_all_columns(table, table->read_set);
+
+ se->new_lookup_keys();
+
+ while (!(source_exhausted= mrr_funcs.next(mrr_iter, &mrr_cur_range)))
+ {
+ char *cass_key;
+ int cass_key_len;
+
+ DBUG_ASSERT(mrr_cur_range.range_flag & EQ_RANGE);
+
+ uchar *key= (uchar*)mrr_cur_range.start_key.key;
+ key_len= mrr_cur_range.start_key.length;
+ //key_len= calculate_key_len(table, active_index, key, keypart_map); // NEED THIS??
+ store_key_image_to_rec(table->field[0], (uchar*)key, key_len);
+
+ rowkey_converter->mariadb_to_cassandra(&cass_key, &cass_key_len);
+
+ // Primitive buffer control
+ if (se->add_lookup_key(cass_key, cass_key_len) >
+ THDVAR(table->in_use, multiget_batch_size))
+ break;
+ }
+
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+
+ return se->multiget_slice();
+}
+
+
+int ha_cassandra::multi_range_read_next(range_id_t *range_info)
+{
+ int res;
+ while(1)
+ {
+ if (!se->get_next_multiget_row())
+ {
+ res= read_cassandra_columns(true);
+ break;
+ }
+ else
+ {
+ if (source_exhausted)
+ {
+ res= HA_ERR_END_OF_FILE;
+ break;
+ }
+ else
+ {
+ if (mrr_start_read())
+ {
+ res= HA_ERR_INTERNAL_ERROR;
+ break;
+ }
+ }
+ }
+ /*
+ We get here if we've refilled the buffer and done another read. Try
+ reading from results again
+ */
+ }
+ return res;
+}
+
+
+int ha_cassandra::multi_range_read_explain_info(uint mrr_mode, char *str, size_t size)
+{
+ const char *mrr_str= "multiget_slice";
+
+ if (!(mrr_mode & HA_MRR_USE_DEFAULT_IMPL))
+ {
+ uint mrr_str_len= strlen(mrr_str);
+ uint copy_len= min(mrr_str_len, size);
+ memcpy(str, mrr_str, size);
+ return copy_len;
+ }
+ return 0;
+}
+
+
+class Column_name_enumerator_impl : public Column_name_enumerator
+{
+ ha_cassandra *obj;
+ uint idx;
+public:
+ Column_name_enumerator_impl(ha_cassandra *obj_arg) : obj(obj_arg), idx(1) {}
+ const char* get_next_name()
+ {
+ if (idx == obj->table->s->fields)
+ return NULL;
+ else
+ return obj->table->field[idx++]->field_name;
+ }
+};
+
+
+int ha_cassandra::update_row(const uchar *old_data, uchar *new_data)
+{
+ DYNAMIC_COLUMN_VALUE *oldvals, *vals;
+ LEX_STRING *oldnames, *names;
+ uint oldcount, count;
+ String oldvalcol, valcol;
+ my_bitmap_map *old_map;
+ int res;
+ DBUG_ENTER("ha_cassandra::update_row");
+ /* Currently, it is guaranteed that new_data == table->record[0] */
+ DBUG_ASSERT(new_data == table->record[0]);
+ /* For now, just rewrite the full record */
+ se->clear_insert_buffer();
+
+ old_map= dbug_tmp_use_all_columns(table, table->read_set);
+
+ char *old_key;
+ int old_key_len;
+ se->get_read_rowkey(&old_key, &old_key_len);
+
+ /* Get the key we're going to write */
+ char *new_key;
+ int new_key_len;
+ if (rowkey_converter->mariadb_to_cassandra(&new_key, &new_key_len))
+ {
+ my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
+ rowkey_converter->field->field_name, insert_lineno);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+
+ /*
+ Compare it to the key we've read. For all types that Cassandra supports,
+ binary byte-wise comparison can be used
+ */
+ bool new_primary_key;
+ if (new_key_len != old_key_len || memcmp(old_key, new_key, new_key_len))
+ new_primary_key= true;
+ else
+ new_primary_key= false;
+
+ if (dyncol_set)
+ {
+ Field *field= table->field[dyncol_field];
+ /* move to get old_data */
+ my_ptrdiff_t diff;
+ diff= (my_ptrdiff_t) (old_data - new_data);
+ field->move_field_offset(diff); // Points now at old_data
+ if ((res= read_dyncol(&oldcount, &oldvals, &oldnames, &oldvalcol)))
+ DBUG_RETURN(res);
+ field->move_field_offset(-diff); // back to new_data
+ if ((res= read_dyncol(&count, &vals, &names, &valcol)))
+ {
+ free_dynamic_row(&oldvals, &oldnames);
+ DBUG_RETURN(res);
+ }
+ }
+
+ if (new_primary_key)
+ {
+ /*
+ Primary key value changed. This is essentially a DELETE + INSERT.
+ Add a DELETE operation into the batch
+ */
+ Column_name_enumerator_impl name_enumerator(this);
+ se->add_row_deletion(old_key, old_key_len, &name_enumerator,
+ oldnames,
+ (dyncol_set ? oldcount : 0));
+ oldcount= 0; // they will be deleted
+ }
+
+ se->start_row_insert(new_key, new_key_len);
+
+ /* Convert other fields */
+ for (uint i= 1; i < table->s->fields; i++)
+ {
+ char *cass_data;
+ int cass_data_len;
+ if (dyncol_set && dyncol_field == i)
+ {
+ DBUG_ASSERT(field_converters[i] == NULL);
+ if ((res= write_dynamic_row(count, vals, names)))
+ goto err;
+ }
+ else
+ {
+ if (field_converters[i]->mariadb_to_cassandra(&cass_data, &cass_data_len))
+ {
+ my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
+ field_converters[i]->field->field_name, insert_lineno);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ }
+ se->add_insert_column(field_converters[i]->field->field_name, 0,
+ cass_data, cass_data_len);
+ }
+ }
+ if (dyncol_set)
+ {
+ /* find removed fields */
+ uint i= 0, j= 0;
+ /* both array are sorted */
+ for(; i < oldcount; i++)
+ {
+ int scmp= 0;
+ while (j < count &&
+ (scmp = mariadb_dyncol_column_cmp_named(names + j,
+ oldnames + i)) < 0)
+ j++;
+ if (j < count &&
+ scmp == 0)
+ j++;
+ else
+ se->add_insert_delete_column(oldnames[i].str, oldnames[i].length);
+ }
+ }
+
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+
+ res= se->do_insert();
+
+ if (res)
+ my_error(ER_INTERNAL_ERROR, MYF(0), se->error_str());
+
+err:
+ if (dyncol_set)
+ {
+ free_dynamic_row(&oldvals, &oldnames);
+ free_dynamic_row(&vals, &names);
+ }
+
+ DBUG_RETURN(res? HA_ERR_INTERNAL_ERROR: 0);
+}
+
+
+/*
+ We can't really have any locks for Cassandra Storage Engine. We're reading
+ from Cassandra cluster, and other clients can asynchronously modify the data.
+
+ We can enforce locking within this process, but this will not be useful.
+
+ Thus, store_lock() should express that:
+ - Writes do not block other writes
+ - Reads should not block anything either, including INSERTs.
+*/
+THR_LOCK_DATA **ha_cassandra::store_lock(THD *thd,
+ THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+{
+ DBUG_ENTER("ha_cassandra::store_lock");
+ if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
+ {
+ /* Writes allow other writes */
+ if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
+ lock_type <= TL_WRITE))
+ lock_type = TL_WRITE_ALLOW_WRITE;
+
+ /* Reads allow everything, including INSERTs */
+ if (lock_type == TL_READ_NO_INSERT)
+ lock_type = TL_READ;
+
+ lock.type= lock_type;
+ }
+ *to++= &lock;
+ DBUG_RETURN(to);
+}
+
+
+ha_rows ha_cassandra::records_in_range(uint inx, key_range *min_key,
+ key_range *max_key)
+{
+ DBUG_ENTER("ha_cassandra::records_in_range");
+ DBUG_RETURN(HA_POS_ERROR); /* Range scans are not supported */
+}
+
+
+/**
+ check_if_incompatible_data() called if ALTER TABLE can't detect otherwise
+ if new and old definition are compatible
+
+ @details If there are no other explicit signs like changed number of
+ fields this function will be called by compare_tables()
+ (sql/sql_tables.cc) to decide should we rewrite whole table or only .frm
+ file.
+
+*/
+
+bool ha_cassandra::check_if_incompatible_data(HA_CREATE_INFO *info,
+ uint table_changes)
+{
+ DBUG_ENTER("ha_cassandra::check_if_incompatible_data");
+ /* Checked, we intend to have this empty for Cassandra SE. */
+ DBUG_RETURN(COMPATIBLE_DATA_YES);
+}
+
+
+void Cassandra_se_interface::print_error(const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ // it's not a problem if output was truncated
+ my_vsnprintf(err_buffer, sizeof(err_buffer), format, ap);
+ va_end(ap);
+}
+
+
+struct st_mysql_storage_engine cassandra_storage_engine=
+{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+
+static SHOW_VAR cassandra_status_variables[]= {
+ {"Cassandra_row_inserts",
+ (char*) &cassandra_counters.row_inserts, SHOW_LONG},
+ {"Cassandra_row_insert_batches",
+ (char*) &cassandra_counters.row_insert_batches, SHOW_LONG},
+
+ {"Cassandra_multiget_keys_scanned",
+ (char*) &cassandra_counters.multiget_keys_scanned, SHOW_LONG},
+ {"Cassandra_multiget_reads",
+ (char*) &cassandra_counters.multiget_reads, SHOW_LONG},
+ {"Cassandra_multiget_rows_read",
+ (char*) &cassandra_counters.multiget_rows_read, SHOW_LONG},
+
+ {"Cassandra_timeout_exceptions",
+ (char*) &cassandra_counters.timeout_exceptions, SHOW_LONG},
+ {"Cassandra_unavailable_exceptions",
+ (char*) &cassandra_counters.unavailable_exceptions, SHOW_LONG},
+ {NullS, NullS, SHOW_LONG}
+};
+
+
+
+
+maria_declare_plugin(cassandra)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &cassandra_storage_engine,
+ "CASSANDRA",
+ "Monty Program Ab",
+ "Cassandra storage engine",
+ PLUGIN_LICENSE_GPL,
+ cassandra_init_func, /* Plugin Init */
+ cassandra_done_func, /* Plugin Deinit */
+ 0x0001, /* version number (0.1) */
+ cassandra_status_variables, /* status variables */
+ cassandra_system_variables, /* system variables */
+ "0.1", /* string version */
+ MariaDB_PLUGIN_MATURITY_EXPERIMENTAL /* maturity */
+}
+maria_declare_plugin_end;
diff --git a/storage/cassandra/ha_cassandra.h b/storage/cassandra/ha_cassandra.h
new file mode 100644
index 00000000000..a57a754979c
--- /dev/null
+++ b/storage/cassandra/ha_cassandra.h
@@ -0,0 +1,275 @@
+/*
+ Copyright (c) 2012, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#ifdef USE_PRAGMA_INTERFACE
+#pragma interface /* gcc class implementation */
+#endif
+
+
+#include "my_global.h" /* ulonglong */
+#include "thr_lock.h" /* THR_LOCK, THR_LOCK_DATA */
+#include "handler.h" /* handler */
+#include "my_base.h" /* ha_rows */
+
+#include "cassandra_se.h"
+
+/** @brief
+ CASSANDRA_SHARE is a structure that will be shared among all open handlers.
+ This example implements the minimum of what you will probably need.
+*/
+typedef struct st_cassandra_share {
+ char *table_name;
+ uint table_name_length,use_count;
+ mysql_mutex_t mutex;
+ THR_LOCK lock;
+} CASSANDRA_SHARE;
+
+class ColumnDataConverter;
+struct st_dynamic_column_value;
+typedef struct st_dynamic_column_value DYNAMIC_COLUMN_VALUE;
+
+struct ha_table_option_struct;
+
+
+struct st_dynamic_column_value;
+
+typedef bool (* CAS2DYN_CONVERTER)(const char *cass_data,
+ int cass_data_len,
+ struct st_dynamic_column_value *value,
+ MEM_ROOT *mem_root);
+typedef bool (* DYN2CAS_CONVERTER)(struct st_dynamic_column_value *value,
+ char **cass_data,
+ int *cass_data_len,
+ void *buf, void **freemem);
+struct cassandra_type_def
+{
+ const char *name;
+ CAS2DYN_CONVERTER cassandra_to_dynamic;
+ DYN2CAS_CONVERTER dynamic_to_cassandra;
+};
+
+typedef struct cassandra_type_def CASSANDRA_TYPE_DEF;
+
+enum cassandtra_type_enum {CT_BIGINT, CT_INT, CT_COUNTER, CT_FLOAT, CT_DOUBLE,
+ CT_BLOB, CT_ASCII, CT_TEXT, CT_TIMESTAMP, CT_UUID, CT_BOOLEAN, CT_VARINT,
+ CT_DECIMAL};
+
+typedef enum cassandtra_type_enum CASSANDRA_TYPE;
+
+
+
+/** @brief
+ Class definition for the storage engine
+*/
+class ha_cassandra: public handler
+{
+ friend class Column_name_enumerator_impl;
+ THR_LOCK_DATA lock; ///< MySQL lock
+ CASSANDRA_SHARE *share; ///< Shared lock info
+
+ Cassandra_se_interface *se;
+
+ /* description of static part of the table definition */
+ ColumnDataConverter **field_converters;
+ uint n_field_converters;
+
+ CASSANDRA_TYPE_DEF *default_type_def;
+ /* description of dynamic columns part */
+ CASSANDRA_TYPE_DEF *special_type_field_converters;
+ LEX_STRING *special_type_field_names;
+ uint n_special_type_fields;
+ DYNAMIC_ARRAY dynamic_values, dynamic_names;
+ DYNAMIC_STRING dynamic_rec;
+
+ ColumnDataConverter *rowkey_converter;
+
+ bool setup_field_converters(Field **field, uint n_fields);
+ void free_field_converters();
+
+ int read_cassandra_columns(bool unpack_pk);
+ int check_table_options(struct ha_table_option_struct* options);
+
+ bool doing_insert_batch;
+ ha_rows insert_rows_batched;
+
+ uint dyncol_field;
+ bool dyncol_set;
+
+ /* Used to produce 'wrong column %s at row %lu' warnings */
+ ha_rows insert_lineno;
+ void print_conversion_error(const char *field_name,
+ char *cass_value, int cass_value_len);
+ int connect_and_check_options(TABLE *table_arg);
+public:
+ ha_cassandra(handlerton *hton, TABLE_SHARE *table_arg);
+ ~ha_cassandra()
+ {
+ free_field_converters();
+ delete se;
+ }
+
+ /** @brief
+ The name that will be used for display purposes.
+ */
+ const char *table_type() const { return "CASSANDRA"; }
+
+ /** @brief
+ The name of the index type that will be used for display.
+ Don't implement this method unless you really have indexes.
+ */
+ const char *index_type(uint inx) { return "HASH"; }
+
+ /** @brief
+ The file extensions.
+ */
+ const char **bas_ext() const;
+
+ /** @brief
+ This is a list of flags that indicate what functionality the storage engine
+ implements. The current table flags are documented in handler.h
+ */
+ ulonglong table_flags() const
+ {
+ return HA_BINLOG_STMT_CAPABLE |
+ HA_REC_NOT_IN_SEQ |
+ HA_NO_TRANSACTIONS |
+ HA_REQUIRE_PRIMARY_KEY |
+ HA_PRIMARY_KEY_IN_READ_INDEX |
+ HA_PRIMARY_KEY_REQUIRED_FOR_POSITION |
+ HA_NO_AUTO_INCREMENT |
+ HA_TABLE_SCAN_ON_INDEX;
+ }
+
+ /** @brief
+ This is a bitmap of flags that indicates how the storage engine
+ implements indexes. The current index flags are documented in
+ handler.h. If you do not implement indexes, just return zero here.
+
+ @details
+ part is the key part to check. First key part is 0.
+ If all_parts is set, MySQL wants to know the flags for the combined
+ index, up to and including 'part'.
+ */
+ ulong index_flags(uint inx, uint part, bool all_parts) const
+ {
+ return 0;
+ }
+
+ /** @brief
+ unireg.cc will call max_supported_record_length(), max_supported_keys(),
+ max_supported_key_parts(), uint max_supported_key_length()
+ to make sure that the storage engine can handle the data it is about to
+ send. Return *real* limits of your storage engine here; MySQL will do
+ min(your_limits, MySQL_limits) automatically.
+ */
+ uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
+
+ /* Support only one Primary Key, for now */
+ uint max_supported_keys() const { return 1; }
+ uint max_supported_key_parts() const { return 1; }
+
+ /** @brief
+ unireg.cc will call this to make sure that the storage engine can handle
+ the data it is about to send. Return *real* limits of your storage engine
+ here; MySQL will do min(your_limits, MySQL_limits) automatically.
+
+ @details
+ There is no need to implement ..._key_... methods if your engine doesn't
+ support indexes.
+ */
+ uint max_supported_key_length() const { return 16*1024; /* just to return something*/ }
+
+ int index_init(uint idx, bool sorted);
+
+ int index_read_map(uchar * buf, const uchar * key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag);
+
+ /** @brief
+ Called in test_quick_select to determine if indexes should be used.
+ */
+ virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; }
+
+ /** @brief
+ This method will never be called if you do not implement indexes.
+ */
+ virtual double read_time(uint, uint, ha_rows rows)
+ { return (double) rows / 20.0+1; }
+
+ virtual void start_bulk_insert(ha_rows rows, uint flags);
+ virtual int end_bulk_insert();
+
+ virtual int reset();
+
+
+ int multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
+ uint n_ranges, uint mode, HANDLER_BUFFER *buf);
+ int multi_range_read_next(range_id_t *range_info);
+ ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
+ void *seq_init_param,
+ uint n_ranges, uint *bufsz,
+ uint *flags, COST_VECT *cost);
+ ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
+ uint key_parts, uint *bufsz,
+ uint *flags, COST_VECT *cost);
+ int multi_range_read_explain_info(uint mrr_mode, char *str, size_t size);
+
+private:
+ bool source_exhausted;
+ bool mrr_start_read();
+ int check_field_options(Field **fields);
+ int read_dyncol(uint *count,
+ DYNAMIC_COLUMN_VALUE **vals, LEX_STRING **names,
+ String *valcol);
+ int write_dynamic_row(uint count,
+ DYNAMIC_COLUMN_VALUE *vals,
+ LEX_STRING *names);
+ void static free_dynamic_row(DYNAMIC_COLUMN_VALUE **vals,
+ LEX_STRING **names);
+ CASSANDRA_TYPE_DEF * get_cassandra_field_def(char *cass_name,
+ int cass_name_length);
+public:
+ int open(const char *name, int mode, uint test_if_locked);
+ int close(void);
+
+ int write_row(uchar *buf);
+ int update_row(const uchar *old_data, uchar *new_data);
+ int delete_row(const uchar *buf);
+
+ /** @brief
+ Unlike index_init(), rnd_init() can be called two consecutive times
+ without rnd_end() in between (it only makes sense if scan=1). In this
+ case, the second call should prepare for the new table scan (e.g if
+ rnd_init() allocates the cursor, the second call should position the
+ cursor to the start of the table; no need to deallocate and allocate
+ it again. This is a required method.
+ */
+ int rnd_init(bool scan); //required
+ int rnd_end();
+ int rnd_next(uchar *buf); ///< required
+ int rnd_pos(uchar *buf, uchar *pos); ///< required
+ void position(const uchar *record); ///< required
+ int info(uint); ///< required
+ int delete_all_rows(void);
+ ha_rows records_in_range(uint inx, key_range *min_key,
+ key_range *max_key);
+ int create(const char *name, TABLE *form,
+ HA_CREATE_INFO *create_info); ///< required
+ bool check_if_incompatible_data(HA_CREATE_INFO *info,
+ uint table_changes);
+
+ THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type); ///< required
+};
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index d31e5ee8d89..50e639881c5 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -497,7 +497,7 @@ ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg)
buffer.set((char*)byte_buffer, IO_SIZE, &my_charset_bin);
chain= chain_buffer;
file_buff= new Transparent_file();
- init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0);;
+ init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0, MYF(0));
}
@@ -961,7 +961,7 @@ int ha_tina::open(const char *name, int mode, uint open_options)
*/
thr_lock_data_init(&share->lock, &lock, (void*) this);
ref_length= sizeof(my_off_t);
- init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0);
+ init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0, MYF(0));
share->lock.get_status= tina_get_status;
share->lock.update_status= tina_update_status;
@@ -999,9 +999,6 @@ int ha_tina::write_row(uchar * buf)
ha_statistic_increment(&SSV::ha_write_count);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
-
size= encode_quote(buf);
if (!share->tina_write_opened)
@@ -1064,9 +1061,6 @@ int ha_tina::update_row(const uchar * old_data, uchar * new_data)
ha_statistic_increment(&SSV::ha_update_count);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
-
size= encode_quote(new_data);
/*
diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h
index 88af2c9652c..26404b3a9e7 100644
--- a/storage/csv/ha_tina.h
+++ b/storage/csv/ha_tina.h
@@ -102,7 +102,6 @@ public:
delete file_buff;
free_root(&blobroot, MYF(0));
}
- const char *table_type() const { return "CSV"; }
const char *index_type(uint inx) { return "NONE"; }
const char **bas_ext() const;
ulonglong table_flags() const
diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc
index 2c65fd657a9..81f6cbe707f 100644
--- a/storage/example/ha_example.cc
+++ b/storage/example/ha_example.cc
@@ -1097,7 +1097,8 @@ static struct st_mysql_sys_var* example_system_variables[]= {
NULL
};
-// this is an example of SHOW_FUNC and of my_snprintf() service
+// this is an example of SHOW_SIMPLE_FUNC and of my_snprintf() service
+// If this function would return an array, one should use SHOW_FUNC
static int show_func_example(MYSQL_THD thd, struct st_mysql_show_var *var,
char *buf)
{
@@ -1111,7 +1112,7 @@ static int show_func_example(MYSQL_THD thd, struct st_mysql_show_var *var,
static struct st_mysql_show_var func_status[]=
{
- {"example_func_example", (char *)show_func_example, SHOW_FUNC},
+ {"example_func_example", (char *)show_func_example, SHOW_SIMPLE_FUNC},
{0,0,SHOW_UNDEF}
};
diff --git a/storage/example/ha_example.h b/storage/example/ha_example.h
index ca8ca5ff293..9be370edfe3 100644
--- a/storage/example/ha_example.h
+++ b/storage/example/ha_example.h
@@ -67,11 +67,6 @@ public:
}
/** @brief
- The name that will be used for display purposes.
- */
- const char *table_type() const { return "EXAMPLE"; }
-
- /** @brief
The name of the index type that will be used for display.
Don't implement this method unless you really have indexes.
*/
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index c0bf0acb523..b554c344f62 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -1515,7 +1515,7 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table)
*/
query.length(0);
- init_alloc_root(&mem_root, 256, 0);
+ init_alloc_root(&mem_root, 256, 0, MYF(0));
mysql_mutex_lock(&federated_mutex);
@@ -1656,7 +1656,7 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked)
ref_length= sizeof(MYSQL_RES *) + sizeof(MYSQL_ROW_OFFSET);
DBUG_PRINT("info", ("ref_length: %u", ref_length));
- my_init_dynamic_array(&results, sizeof(MYSQL_RES *), 4, 4);
+ my_init_dynamic_array(&results, sizeof(MYSQL_RES *), 4, 4, MYF(0));
reset();
DBUG_RETURN(0);
@@ -1849,8 +1849,6 @@ int ha_federated::write_row(uchar *buf)
values_string.length(0);
insert_field_value_string.length(0);
ha_statistic_increment(&SSV::ha_write_count);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
/*
start both our field and field values strings
@@ -1981,7 +1979,7 @@ int ha_federated::write_row(uchar *buf)
@details Initializes memory structures required for bulk insert.
*/
-void ha_federated::start_bulk_insert(ha_rows rows)
+void ha_federated::start_bulk_insert(ha_rows rows, uint flags)
{
uint page_size;
DBUG_ENTER("ha_federated::start_bulk_insert");
@@ -3167,7 +3165,6 @@ int ha_federated::real_connect()
this->table->s->table_charset->csname);
sql_query.length(0);
-
if (!mysql_real_connect(mysql,
share->hostname,
share->username,
diff --git a/storage/federated/ha_federated.h b/storage/federated/ha_federated.h
index 98b0efdbe2b..fc2c4740cc0 100644
--- a/storage/federated/ha_federated.h
+++ b/storage/federated/ha_federated.h
@@ -124,8 +124,6 @@ private:
public:
ha_federated(handlerton *hton, TABLE_SHARE *table_arg);
~ha_federated() {}
- /* The name that will be used for display purposes */
- const char *table_type() const { return "FEDERATED"; }
/*
Next pointer used in transaction
*/
@@ -211,7 +209,7 @@ public:
int open(const char *name, int mode, uint test_if_locked); // required
int close(void); // required
- void start_bulk_insert(ha_rows rows);
+ void start_bulk_insert(ha_rows rows, uint flags);
int end_bulk_insert();
int write_row(uchar *buf);
int update_row(const uchar *old_data, uchar *new_data);
diff --git a/storage/federatedx/federatedx_io_mysql.cc b/storage/federatedx/federatedx_io_mysql.cc
index a2ba496ea47..effbe899e7f 100644
--- a/storage/federatedx/federatedx_io_mysql.cc
+++ b/storage/federatedx/federatedx_io_mysql.cc
@@ -136,7 +136,7 @@ federatedx_io_mysql::federatedx_io_mysql(FEDERATEDX_SERVER *aserver)
bzero(&mysql, sizeof(MYSQL));
bzero(&savepoints, sizeof(DYNAMIC_ARRAY));
- my_init_dynamic_array(&savepoints, sizeof(SAVEPT), 16, 16);
+ my_init_dynamic_array(&savepoints, sizeof(SAVEPT), 16, 16, MYF(0));
DBUG_VOID_RETURN;
}
diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc
index a10285a81aa..e1c2a38964a 100644
--- a/storage/federatedx/ha_federatedx.cc
+++ b/storage/federatedx/ha_federatedx.cc
@@ -1519,7 +1519,7 @@ static FEDERATEDX_SERVER *get_server(FEDERATEDX_SHARE *share, TABLE *table)
mysql_mutex_assert_owner(&federatedx_mutex);
- init_alloc_root(&mem_root, 4096, 4096);
+ init_alloc_root(&mem_root, 4096, 4096, MYF(0));
fill_server(&mem_root, &tmp_server, share, table ? table->s->table_charset : 0);
@@ -1577,7 +1577,7 @@ static FEDERATEDX_SHARE *get_share(const char *table_name, TABLE *table)
query.length(0);
bzero(&tmp_share, sizeof(tmp_share));
- init_alloc_root(&mem_root, 256, 0);
+ init_alloc_root(&mem_root, 256, 0, MYF(0));
mysql_mutex_lock(&federatedx_mutex);
@@ -1791,7 +1791,7 @@ int ha_federatedx::open(const char *name, int mode, uint test_if_locked)
DBUG_PRINT("info", ("ref_length: %u", ref_length));
- my_init_dynamic_array(&results, sizeof(FEDERATEDX_IO_RESULT*), 4, 4);
+ my_init_dynamic_array(&results, sizeof(FEDERATEDX_IO_RESULT*), 4, 4, MYF(0));
reset();
@@ -1993,8 +1993,6 @@ int ha_federatedx::write_row(uchar *buf)
values_string.length(0);
insert_field_value_string.length(0);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
/*
start both our field and field values strings
@@ -2128,7 +2126,7 @@ int ha_federatedx::write_row(uchar *buf)
@details Initializes memory structures required for bulk insert.
*/
-void ha_federatedx::start_bulk_insert(ha_rows rows)
+void ha_federatedx::start_bulk_insert(ha_rows rows, uint flags)
{
uint page_size;
DBUG_ENTER("ha_federatedx::start_bulk_insert");
diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h
index dc7733806ad..dcbbe534262 100644
--- a/storage/federatedx/ha_federatedx.h
+++ b/storage/federatedx/ha_federatedx.h
@@ -311,8 +311,6 @@ private:
public:
ha_federatedx(handlerton *hton, TABLE_SHARE *table_arg);
~ha_federatedx() {}
- /* The name that will be used for display purposes */
- const char *table_type() const { return "FEDERATED"; }
/*
The name of the index type that will be used for display
don't implement this method unless you really have indexes
@@ -392,7 +390,7 @@ public:
int open(const char *name, int mode, uint test_if_locked); // required
int close(void); // required
- void start_bulk_insert(ha_rows rows);
+ void start_bulk_insert(ha_rows rows, uint flags);
int end_bulk_insert();
int write_row(uchar *buf);
int update_row(const uchar *old_data, uchar *new_data);
diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc
index 305a8cae720..8e63799680b 100644
--- a/storage/heap/ha_heap.cc
+++ b/storage/heap/ha_heap.cc
@@ -241,8 +241,6 @@ void ha_heap::update_key_stats()
int ha_heap::write_row(uchar * buf)
{
int res;
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
if (table->next_number_field && buf == table->record[0])
{
if ((res= update_auto_increment()))
@@ -264,8 +262,6 @@ int ha_heap::write_row(uchar * buf)
int ha_heap::update_row(const uchar * old_data, uchar * new_data)
{
int res;
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
res= heap_update(file,old_data,new_data);
if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD >
file->s->records)
@@ -638,7 +634,7 @@ heap_prepare_hp_create_info(TABLE *table_arg, bool internal_table,
if (!(keydef= (HP_KEYDEF*) my_malloc(keys * sizeof(HP_KEYDEF) +
parts * sizeof(HA_KEYSEG),
- MYF(MY_WME))))
+ MYF(MY_WME | MY_THREAD_SPECIFIC))))
return my_errno;
seg= reinterpret_cast<HA_KEYSEG*>(keydef + keys);
for (key= 0; key < keys; key++)
diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h
index c5b8a09b216..30ad06e2c06 100644
--- a/storage/heap/ha_heap.h
+++ b/storage/heap/ha_heap.h
@@ -38,11 +38,6 @@ public:
ha_heap(handlerton *hton, TABLE_SHARE *table);
~ha_heap() {}
handler *clone(const char *name, MEM_ROOT *mem_root);
- const char *table_type() const
- {
- return (table->in_use->variables.sql_mode & MODE_MYSQL323) ?
- "HEAP" : "MEMORY";
- }
const char *index_type(uint inx)
{
return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ?
diff --git a/storage/heap/heapdef.h b/storage/heap/heapdef.h
index db00ea87a01..d5c0ad96b05 100644
--- a/storage/heap/heapdef.h
+++ b/storage/heap/heapdef.h
@@ -62,7 +62,8 @@ typedef struct {
extern HP_SHARE *hp_find_named_heap(const char *name);
extern int hp_rectest(HP_INFO *info,const uchar *old);
extern uchar *hp_find_block(HP_BLOCK *info,ulong pos);
-extern int hp_get_new_block(HP_BLOCK *info, size_t* alloc_length);
+extern int hp_get_new_block(HP_SHARE *info, HP_BLOCK *block,
+ size_t* alloc_length);
extern void hp_free(HP_SHARE *info);
extern uchar *hp_free_level(HP_BLOCK *block,uint level,HP_PTRS *pos,
uchar *last_pos);
diff --git a/storage/heap/hp_block.c b/storage/heap/hp_block.c
index 01978e2b4e8..1c40f982422 100644
--- a/storage/heap/hp_block.c
+++ b/storage/heap/hp_block.c
@@ -45,6 +45,7 @@ uchar *hp_find_block(HP_BLOCK *block, ulong pos)
SYNOPSIS
hp_get_new_block()
+ info heap handle
block HP_BLOCK tree-like block
alloc_length OUT Amount of memory allocated from the heap
@@ -54,7 +55,7 @@ uchar *hp_find_block(HP_BLOCK *block, ulong pos)
1 Out of memory
*/
-int hp_get_new_block(HP_BLOCK *block, size_t *alloc_length)
+int hp_get_new_block(HP_SHARE *info, HP_BLOCK *block, size_t *alloc_length)
{
reg1 uint i,j;
HP_PTRS *root;
@@ -77,7 +78,10 @@ int hp_get_new_block(HP_BLOCK *block, size_t *alloc_length)
*/
*alloc_length= (sizeof(HP_PTRS)* ((i == block->levels) ? i : i - 1) +
block->records_in_block* block->recbuffer);
- if (!(root=(HP_PTRS*) my_malloc(*alloc_length,MYF(MY_WME))))
+ if (!(root=(HP_PTRS*) my_malloc(*alloc_length,
+ MYF(MY_WME |
+ (info->internal ?
+ MY_THREAD_SPECIFIC : 0)))))
return 1;
if (i == 0)
diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c
index d170d1abc65..a8bc8e63810 100644
--- a/storage/heap/hp_create.c
+++ b/storage/heap/hp_create.c
@@ -146,7 +146,9 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info,
if (!(share= (HP_SHARE*) my_malloc((uint) sizeof(HP_SHARE)+
keys*sizeof(HP_KEYDEF)+
key_segs*sizeof(HA_KEYSEG),
- MYF(MY_ZEROFILL))))
+ MYF(MY_ZEROFILL |
+ (create_info->internal_table ?
+ MY_THREAD_SPECIFIC : 0)))))
goto err;
share->keydef= (HP_KEYDEF*) (share + 1);
share->key_stat_version= 1;
@@ -171,7 +173,9 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info,
keyseg++;
init_tree(&keyinfo->rb_tree, 0, 0, sizeof(uchar*),
- (qsort_cmp2)keys_compare, 1, NULL, NULL);
+ (qsort_cmp2)keys_compare, NULL, NULL,
+ MYF((create_info->internal_table ? MY_THREAD_SPECIFIC : 0) |
+ MY_TREE_WITH_DELETE));
keyinfo->delete_key= hp_rb_delete_key;
keyinfo->write_key= hp_rb_write_key;
}
@@ -199,6 +203,7 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info,
share->auto_key_type= create_info->auto_key_type;
share->auto_increment= create_info->auto_increment;
share->create_time= (long) time((time_t*) 0);
+ share->internal= create_info->internal_table;
/* Must be allocated separately for rename to work */
if (!(share->name= my_strdup(name,MYF(0))))
{
diff --git a/storage/heap/hp_open.c b/storage/heap/hp_open.c
index c456dbdfc84..fc7397989f2 100644
--- a/storage/heap/hp_open.c
+++ b/storage/heap/hp_open.c
@@ -32,7 +32,9 @@ HP_INFO *heap_open_from_share(HP_SHARE *share, int mode)
if (!(info= (HP_INFO*) my_malloc((uint) sizeof(HP_INFO) +
2 * share->max_key_length,
- MYF(MY_ZEROFILL))))
+ MYF(MY_ZEROFILL +
+ (share->internal ?
+ MY_THREAD_SPECIFIC : 0)))))
{
DBUG_RETURN(0);
}
diff --git a/storage/heap/hp_write.c b/storage/heap/hp_write.c
index 8bfecc7747c..c84fc4b6104 100644
--- a/storage/heap/hp_write.c
+++ b/storage/heap/hp_write.c
@@ -162,7 +162,7 @@ static uchar *next_free_record_pos(HP_SHARE *info)
my_errno=HA_ERR_RECORD_FILE_FULL;
DBUG_RETURN(NULL);
}
- if (hp_get_new_block(&info->block,&length))
+ if (hp_get_new_block(info, &info->block,&length))
DBUG_RETURN(NULL);
info->data_length+=length;
}
@@ -407,7 +407,7 @@ static HASH_INFO *hp_find_free_hash(HP_SHARE *info,
return hp_find_hash(block,records);
if (!(block_pos=(records % block->records_in_block)))
{
- if (hp_get_new_block(block,&length))
+ if (hp_get_new_block(info, block, &length))
return(NULL);
info->index_length+=length;
}
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 7c036d3b601..bf7bfce6b5c 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -106,6 +106,7 @@ static ulong commit_threads = 0;
static mysql_mutex_t commit_threads_m;
static mysql_cond_t commit_cond;
static mysql_mutex_t commit_cond_m;
+static mysql_mutex_t pending_checkpoint_mutex;
static bool innodb_inited = 0;
#define INSIDE_HA_INNOBASE_CC
@@ -222,11 +223,13 @@ static mysql_pfs_key_t innobase_share_mutex_key;
static mysql_pfs_key_t commit_threads_m_key;
static mysql_pfs_key_t commit_cond_mutex_key;
static mysql_pfs_key_t commit_cond_key;
+static mysql_pfs_key_t pending_checkpoint_mutex_key;
static PSI_mutex_info all_pthread_mutexes[] = {
{&commit_threads_m_key, "commit_threads_m", 0},
{&commit_cond_mutex_key, "commit_cond_mutex", 0},
- {&innobase_share_mutex_key, "innobase_share_mutex", 0}
+ {&innobase_share_mutex_key, "innobase_share_mutex", 0},
+ {&pending_checkpoint_mutex_key, "pending_checkpoint_mutex", 0}
};
static PSI_cond_info all_innodb_conds[] = {
@@ -348,6 +351,7 @@ static int innobase_rollback_to_savepoint(handlerton *hton, THD* thd,
static int innobase_savepoint(handlerton *hton, THD* thd, void *savepoint);
static int innobase_release_savepoint(handlerton *hton, THD* thd,
void *savepoint);
+static void innobase_checkpoint_request(handlerton *hton, void *cookie);
static handler *innobase_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root);
@@ -2271,6 +2275,7 @@ innobase_init(
innobase_hton->recover=innobase_xa_recover;
innobase_hton->commit_by_xid=innobase_commit_by_xid;
innobase_hton->rollback_by_xid=innobase_rollback_by_xid;
+ innobase_hton->commit_checkpoint_request=innobase_checkpoint_request;
innobase_hton->create_cursor_read_view=innobase_create_cursor_view;
innobase_hton->set_cursor_read_view=innobase_set_cursor_view;
innobase_hton->close_cursor_read_view=innobase_close_cursor_view;
@@ -2612,6 +2617,9 @@ innobase_change_buffering_inited_ok:
mysql_mutex_init(commit_cond_mutex_key,
&commit_cond_m, MY_MUTEX_INIT_FAST);
mysql_cond_init(commit_cond_key, &commit_cond, NULL);
+ mysql_mutex_init(pending_checkpoint_mutex_key,
+ &pending_checkpoint_mutex,
+ MY_MUTEX_INIT_FAST);
innodb_inited= 1;
#ifdef MYSQL_DYNAMIC_PLUGIN
if (innobase_hton != p) {
@@ -2659,6 +2667,7 @@ innobase_end(
mysql_mutex_destroy(&commit_threads_m);
mysql_mutex_destroy(&commit_cond_m);
mysql_cond_destroy(&commit_cond);
+ mysql_mutex_destroy(&pending_checkpoint_mutex);
}
DBUG_RETURN(err);
@@ -3028,6 +3037,147 @@ innobase_rollback_trx(
DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL));
}
+
+struct pending_checkpoint {
+ struct pending_checkpoint *next;
+ handlerton *hton;
+ void *cookie;
+ ib_uint64_t lsn;
+};
+static struct pending_checkpoint *pending_checkpoint_list;
+static struct pending_checkpoint *pending_checkpoint_list_end;
+
+/*****************************************************************//**
+Handle a commit checkpoint request from server layer.
+We put the request in a queue, so that we can notify upper layer about
+checkpoint complete when we have flushed the redo log.
+If we have already flushed all relevant redo log, we notify immediately.*/
+static
+void
+innobase_checkpoint_request(
+ handlerton *hton,
+ void *cookie)
+{
+ ib_uint64_t lsn;
+ ib_uint64_t flush_lsn;
+ struct pending_checkpoint * entry;
+
+ /* Do the allocation outside of lock to reduce contention. The normal
+ case is that not everything is flushed, so we will need to enqueue. */
+ entry = static_cast<struct pending_checkpoint *>
+ (my_malloc(sizeof(*entry), MYF(MY_WME)));
+ if (!entry) {
+ sql_print_error("Failed to allocate %u bytes."
+ " Commit checkpoint will be skipped.",
+ static_cast<unsigned>(sizeof(*entry)));
+ return;
+ }
+
+ entry->next = NULL;
+ entry->hton = hton;
+ entry->cookie = cookie;
+
+ mysql_mutex_lock(&pending_checkpoint_mutex);
+ lsn = log_get_lsn();
+ flush_lsn = log_get_flush_lsn();
+ if (lsn > flush_lsn) {
+ /* Put the request in queue.
+ When the log gets flushed past the lsn, we will remove the
+ entry from the queue and notify the upper layer. */
+ entry->lsn = lsn;
+ if (pending_checkpoint_list_end) {
+ pending_checkpoint_list_end->next = entry;
+ /* There is no need to order the entries in the list
+ by lsn. The upper layer can accept notifications in
+ any order, and short delays in notifications do not
+ significantly impact performance. */
+ } else {
+ pending_checkpoint_list = entry;
+ }
+ pending_checkpoint_list_end = entry;
+ entry = NULL;
+ }
+ mysql_mutex_unlock(&pending_checkpoint_mutex);
+
+ if (entry) {
+ /* We are already flushed. Notify the checkpoint immediately. */
+ commit_checkpoint_notify_ha(entry->hton, entry->cookie);
+ my_free(entry);
+ }
+}
+
+/*****************************************************************//**
+Log code calls this whenever log has been written and/or flushed up
+to a new position. We use this to notify upper layer of a new commit
+checkpoint when necessary.*/
+extern "C" UNIV_INTERN
+void
+innobase_mysql_log_notify(
+/*===============*/
+ ib_uint64_t write_lsn, /*!< in: LSN written to log file */
+ ib_uint64_t flush_lsn) /*!< in: LSN flushed to disk */
+{
+ struct pending_checkpoint * pending;
+ struct pending_checkpoint * entry;
+ struct pending_checkpoint * last_ready;
+
+ /* It is safe to do a quick check for NULL first without lock.
+ Even if we should race, we will at most skip one checkpoint and
+ take the next one, which is harmless. */
+ if (!pending_checkpoint_list)
+ return;
+
+ mysql_mutex_lock(&pending_checkpoint_mutex);
+ pending = pending_checkpoint_list;
+ if (!pending)
+ {
+ mysql_mutex_unlock(&pending_checkpoint_mutex);
+ return;
+ }
+
+ last_ready = NULL;
+ for (entry = pending; entry != NULL; entry = entry -> next)
+ {
+ /* Notify checkpoints up until the first entry that has not
+ been fully flushed to the redo log. Since we do not maintain
+ the list ordered, in principle there could be more entries
+ later than were also flushed. But there is no harm in
+ delaying notifications for those a bit. And in practise, the
+ list is unlikely to have more than one element anyway, as we
+ flush the redo log at least once every second. */
+ if (entry->lsn > flush_lsn)
+ break;
+ last_ready = entry;
+ }
+
+ if (last_ready)
+ {
+ /* We found some pending checkpoints that are now flushed to
+ disk. So remove them from the list. */
+ pending_checkpoint_list = entry;
+ if (!entry)
+ pending_checkpoint_list_end = NULL;
+ }
+
+ mysql_mutex_unlock(&pending_checkpoint_mutex);
+
+ if (!last_ready)
+ return;
+
+ /* Now that we have released the lock, notify upper layer about all
+ commit checkpoints that have now completed. */
+ for (;;) {
+ entry = pending;
+ pending = pending->next;
+
+ commit_checkpoint_notify_ha(entry->hton, entry->cookie);
+
+ my_free(entry);
+ if (entry == last_ready)
+ break;
+ }
+}
+
/*****************************************************************//**
Rolls back a transaction to a savepoint.
@return 0 if success, HA_ERR_NO_SAVEPOINT if no savepoint with the
@@ -5190,9 +5340,6 @@ ha_innobase::write_row(
ha_statistic_increment(&SSV::ha_write_count);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
-
sql_command = thd_sql_command(user_thd);
if ((sql_command == SQLCOM_ALTER_TABLE
@@ -5600,9 +5747,6 @@ ha_innobase::update_row(
ha_statistic_increment(&SSV::ha_update_count);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
-
if (prebuilt->upd_node) {
uvect = prebuilt->upd_node->update;
} else {
@@ -6925,7 +7069,7 @@ get_row_format_name(
if (!srv_file_per_table) { \
push_warning_printf( \
thd, MYSQL_ERROR::WARN_LEVEL_WARN, \
- ER_ILLEGAL_HA_CREATE_OPTION, \
+ HA_WRONG_CREATE_OPTION, \
"InnoDB: ROW_FORMAT=%s requires" \
" innodb_file_per_table.", \
get_row_format_name(row_format)); \
@@ -6937,7 +7081,7 @@ get_row_format_name(
if (srv_file_format < DICT_TF_FORMAT_ZIP) { \
push_warning_printf( \
thd, MYSQL_ERROR::WARN_LEVEL_WARN, \
- ER_ILLEGAL_HA_CREATE_OPTION, \
+ HA_WRONG_CREATE_OPTION, \
"InnoDB: ROW_FORMAT=%s requires" \
" innodb_file_format > Antelope.", \
get_row_format_name(row_format)); \
@@ -6987,7 +7131,7 @@ create_options_are_valid(
if (!srv_file_per_table) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_per_table.");
ret = FALSE;
@@ -6995,7 +7139,7 @@ create_options_are_valid(
if (srv_file_format < DICT_TF_FORMAT_ZIP) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_format > Antelope.");
ret = FALSE;
@@ -7004,7 +7148,7 @@ create_options_are_valid(
default:
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: invalid KEY_BLOCK_SIZE = %lu."
" Valid values are [1, 2, 4, 8, 16]",
create_info->key_block_size);
@@ -7029,7 +7173,7 @@ create_options_are_valid(
if (kbs_specified) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: cannot specify ROW_FORMAT = %s"
" with KEY_BLOCK_SIZE.",
get_row_format_name(row_format));
@@ -7043,7 +7187,7 @@ create_options_are_valid(
case ROW_TYPE_NOT_USED:
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION, \
+ HA_WRONG_CREATE_OPTION, \
"InnoDB: invalid ROW_FORMAT specifier.");
ret = FALSE;
break;
@@ -7146,7 +7290,7 @@ ha_innobase::create(
/* Validate create options if innodb_strict_mode is set. */
if (!create_options_are_valid(thd, form, create_info)) {
- DBUG_RETURN(ER_ILLEGAL_HA_CREATE_OPTION);
+ DBUG_RETURN(HA_WRONG_CREATE_OPTION);
}
if (create_info->key_block_size) {
@@ -7172,7 +7316,7 @@ ha_innobase::create(
if (!srv_file_per_table) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_per_table.");
flags = 0;
@@ -7181,7 +7325,7 @@ ha_innobase::create(
if (file_format < DICT_TF_FORMAT_ZIP) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_format > Antelope.");
flags = 0;
@@ -7190,7 +7334,7 @@ ha_innobase::create(
if (!flags) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: ignoring KEY_BLOCK_SIZE=%lu.",
create_info->key_block_size);
}
@@ -7212,7 +7356,7 @@ ha_innobase::create(
with ALTER TABLE anyway. */
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: ignoring KEY_BLOCK_SIZE=%lu"
" unless ROW_FORMAT=COMPRESSED.",
create_info->key_block_size);
@@ -7243,14 +7387,14 @@ ha_innobase::create(
if (!srv_file_per_table) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_per_table.",
get_row_format_name(row_format));
} else if (file_format < DICT_TF_FORMAT_ZIP) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_format > Antelope.",
get_row_format_name(row_format));
@@ -7267,7 +7411,7 @@ ha_innobase::create(
case ROW_TYPE_PAGE:
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: assuming ROW_FORMAT=COMPACT.");
case ROW_TYPE_DEFAULT:
case ROW_TYPE_COMPACT:
@@ -11518,10 +11662,17 @@ static MYSQL_SYSVAR_STR(file_format_max, innobase_file_format_max,
static MYSQL_SYSVAR_ULONG(flush_log_at_trx_commit, srv_flush_log_at_trx_commit,
PLUGIN_VAR_OPCMDARG,
- "Set to 0 (write and flush once per second),"
- " 1 (write and flush at each commit)"
- " or 2 (write at commit, flush once per second).",
- NULL, NULL, 1, 0, 2, 0);
+ "Controls the durability/speed trade-off for commits."
+ " Set to 0 (write and flush redo log to disk only once per second),"
+ " 1 (flush to disk at each commit),"
+ " 2 (write to log at commit but flush to disk only once per second)"
+ " or 3 (flush to disk at prepare and at commit, slower and usually redundant)."
+ " 1 and 3 guarantees that after a crash, committed transactions will"
+ " not be lost and will be consistent with the binlog and other transactional"
+ " engines. 2 can get inconsistent and lose transactions if there is a"
+ " power failure or kernel crash but not if mysqld crashes. 0 has no"
+ " guarantees in case of crash. 0 and 2 can be faster than 1 or 3.",
+ NULL, NULL, 1, 0, 3, 0);
static MYSQL_SYSVAR_STR(flush_method, innobase_file_flush_method,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h
index edf7a1a28c1..a0ec09c892f 100644
--- a/storage/innobase/include/ha_prototypes.h
+++ b/storage/innobase/include/ha_prototypes.h
@@ -136,6 +136,17 @@ innobase_mysql_print_thd(
uint max_query_len); /*!< in: max query length to print, or 0 to
use the default max length */
+/*****************************************************************//**
+Log code calls this whenever log has been written and/or flushed up
+to a new position. We use this to notify upper layer of a new commit
+checkpoint when necessary.*/
+UNIV_INTERN
+void
+innobase_mysql_log_notify(
+/*===============*/
+ ib_uint64_t write_lsn, /*!< in: LSN written to log file */
+ ib_uint64_t flush_lsn); /*!< in: LSN flushed to disk */
+
/**************************************************************//**
Converts a MySQL type to an InnoDB type. Note that this function returns
the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1
diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h
index 5524272d811..f280deeee77 100644
--- a/storage/innobase/include/log0log.h
+++ b/storage/innobase/include/log0log.h
@@ -154,6 +154,13 @@ UNIV_INLINE
ib_uint64_t
log_get_lsn(void);
/*=============*/
+/************************************************************//**
+Gets the last lsn that is fully flushed to disk.
+@return last flushed lsn */
+UNIV_INLINE
+ib_uint64_t
+log_get_flush_lsn(void);
+/*=============*/
/****************************************************************
Gets the log group capacity. It is OK to read the value without
holding log_sys->mutex because it is constant.
diff --git a/storage/innobase/include/log0log.ic b/storage/innobase/include/log0log.ic
index 67db6695cab..b54697637b0 100644
--- a/storage/innobase/include/log0log.ic
+++ b/storage/innobase/include/log0log.ic
@@ -411,6 +411,25 @@ log_get_lsn(void)
return(lsn);
}
+/************************************************************//**
+Gets the last lsn that is fully flushed to disk.
+@return last flushed lsn */
+UNIV_INLINE
+ib_uint64_t
+log_get_flush_lsn(void)
+/*=============*/
+{
+ ib_uint64_t lsn;
+
+ mutex_enter(&(log_sys->mutex));
+
+ lsn = log_sys->flushed_to_disk_lsn;
+
+ mutex_exit(&(log_sys->mutex));
+
+ return(lsn);
+}
+
/****************************************************************
Gets the log group capacity. It is OK to read the value without
holding log_sys->mutex because it is constant.
diff --git a/storage/innobase/log/log0log.c b/storage/innobase/log/log0log.c
index 8bae95f0a5d..412fab86858 100644
--- a/storage/innobase/log/log0log.c
+++ b/storage/innobase/log/log0log.c
@@ -48,6 +48,7 @@ Created 12/9/1995 Heikki Tuuri
#include "srv0start.h"
#include "trx0sys.h"
#include "trx0trx.h"
+#include "ha_prototypes.h"
/*
General philosophy of InnoDB redo-logs:
@@ -1353,6 +1354,8 @@ log_write_up_to(
ulint loop_count = 0;
#endif /* UNIV_DEBUG */
ulint unlock;
+ ib_uint64_t write_lsn;
+ ib_uint64_t flush_lsn;
if (recv_no_ibuf_operations) {
/* Recovery is running and no operations on the log files are
@@ -1530,8 +1533,13 @@ loop:
log_flush_do_unlocks(unlock);
+ write_lsn = log_sys->write_lsn;
+ flush_lsn = log_sys->flushed_to_disk_lsn;
+
mutex_exit(&(log_sys->mutex));
+ innobase_mysql_log_notify(write_lsn, flush_lsn);
+
return;
do_waits:
diff --git a/storage/innobase/mysql-test/storage_engine/alter_tablespace.rdiff b/storage/innobase/mysql-test/storage_engine/alter_tablespace.rdiff
new file mode 100644
index 00000000000..0cbe1fa48ae
--- /dev/null
+++ b/storage/innobase/mysql-test/storage_engine/alter_tablespace.rdiff
@@ -0,0 +1,11 @@
+--- suite/storage_engine/alter_tablespace.result 2013-01-13 01:03:49.133994000 +0400
++++ suite/storage_engine/alter_tablespace.reject 2013-01-13 01:04:04.398937286 +0400
+@@ -10,7 +10,7 @@
+ 2
+ ALTER TABLE t1 DISCARD TABLESPACE;
+ SELECT * FROM t1;
+-ERROR HY000: Got error -1 from storage engine
++ERROR HY000: Got error -1 "Internal error < 0 (Not system error)" from storage engine
+ ALTER TABLE t1 IMPORT TABLESPACE;
+ SELECT * FROM t1;
+ a
diff --git a/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff b/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff
index 4c0e0c375f5..8bf84115a52 100644
--- a/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff
+++ b/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff
@@ -4,7 +4,7 @@
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (a <INT_COLUMN>, b <CHAR_COLUMN>) ENGINE=<STORAGE_ENGINE> <CUSTOM_TABLE_OPTIONS> ROW_FORMAT=FIXED;
+Warnings:
-+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
++Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/storage/innobase/trx/trx0trx.c b/storage/innobase/trx/trx0trx.c
index 85246aa6d1f..78191c4c1b4 100644
--- a/storage/innobase/trx/trx0trx.c
+++ b/storage/innobase/trx/trx0trx.c
@@ -1025,7 +1025,8 @@ trx_commit_off_kernel(
trx->must_flush_log_later = TRUE;
} else if (srv_flush_log_at_trx_commit == 0) {
/* Do nothing */
- } else if (srv_flush_log_at_trx_commit == 1) {
+ } else if (srv_flush_log_at_trx_commit == 1 ||
+ srv_flush_log_at_trx_commit == 3) {
if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) {
/* Write the log but do not flush it to disk */
@@ -1712,7 +1713,11 @@ trx_commit_complete_for_mysql(
/* Do nothing */
} else if (srv_flush_log_at_trx_commit == 0) {
/* Do nothing */
- } else if (srv_flush_log_at_trx_commit == 1) {
+ } else if (srv_flush_log_at_trx_commit == 1 && trx->active_commit_ordered) {
+ /* Do nothing - we already flushed the prepare and binlog write
+ to disk, so transaction is durable (will be recovered from
+ binlog if necessary) */
+ } else if (srv_flush_log_at_trx_commit == 1 || srv_flush_log_at_trx_commit == 3) {
if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) {
/* Write the log but do not flush it to disk */
@@ -1992,7 +1997,7 @@ trx_prepare_off_kernel(
if (srv_flush_log_at_trx_commit == 0) {
/* Do nothing */
- } else if (srv_flush_log_at_trx_commit == 1) {
+ } else if (srv_flush_log_at_trx_commit == 1 || srv_flush_log_at_trx_commit == 3) {
if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) {
/* Write the log but do not flush it to disk */
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index 31b903871ce..6264a342d7a 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -398,6 +398,9 @@ static void _ma_check_print_msg(HA_CHECK *param, const char *msg_type,
char msgbuf[MYSQL_ERRMSG_SIZE];
char name[NAME_LEN * 2 + 2];
+ if (param->testflag & T_SUPPRESS_ERR_HANDLING)
+ return;
+
msg_length= my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args);
msgbuf[sizeof(msgbuf) - 1]= 0; // healthy paranoia
@@ -851,6 +854,8 @@ void _ma_check_print_error(HA_CHECK *param, const char *fmt, ...)
DBUG_ENTER("_ma_check_print_error");
param->error_printed |= 1;
param->out_flag |= O_DATA_LOST;
+ if (param->testflag & T_SUPPRESS_ERR_HANDLING)
+ DBUG_VOID_RETURN;
va_start(args, fmt);
_ma_check_print_msg(param, "error", fmt, args);
va_end(args);
@@ -1248,10 +1253,6 @@ int ha_maria::close(void)
int ha_maria::write_row(uchar * buf)
{
- /* If we have a timestamp column, update it to the current time */
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
-
/*
If we have an auto_increment column and we are writing a changed row
or a new row, then update the auto_increment value in the record.
@@ -1584,6 +1585,7 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize)
param->thd= thd;
param->tmpdir= &mysql_tmpdir_list;
param->out_flag= 0;
+ share->state.dupp_key= MI_MAX_KEY;
strmov(fixed_name, share->open_file_name.str);
// Don't lock tables if we have used LOCK TABLE
@@ -1634,6 +1636,10 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize)
error= maria_repair_by_sort(param, file, fixed_name,
test(param->testflag & T_QUICK));
}
+ if (error && file->create_unique_index_by_sort &&
+ share->state.dupp_key != MAX_KEY)
+ print_keydup_error(share->state.dupp_key,
+ ER(ER_DUP_ENTRY_WITH_KEY_NAME), MYF(0));
}
else
{
@@ -1942,6 +1948,8 @@ int ha_maria::enable_indexes(uint mode)
param.op_name= "recreating_index";
param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
T_CREATE_MISSING_KEYS | T_SAFE_REPAIR);
+ if (file->create_unique_index_by_sort)
+ param.testflag|= T_CREATE_UNIQUE_BY_SORT;
if (bulk_insert_single_undo == BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR)
{
bulk_insert_single_undo= BULK_INSERT_SINGLE_UNDO_AND_REPAIR;
@@ -2022,15 +2030,16 @@ int ha_maria::indexes_are_disabled(void)
activate special bulk-insert optimizations
SYNOPSIS
- start_bulk_insert(rows)
- rows Rows to be inserted
+ start_bulk_insert(rows, flags)
+ rows Rows to be inserted
0 if we don't know
+ flags Flags to control index creation
NOTICE
Do not forget to call end_bulk_insert() later!
*/
-void ha_maria::start_bulk_insert(ha_rows rows)
+void ha_maria::start_bulk_insert(ha_rows rows, uint flags)
{
DBUG_ENTER("ha_maria::start_bulk_insert");
THD *thd= table->in_use;
@@ -2096,7 +2105,10 @@ void ha_maria::start_bulk_insert(ha_rows rows)
maria_clear_all_keys_active(file->s->state.key_map);
}
else
- maria_disable_non_unique_index(file, rows);
+ {
+ my_bool all_keys= test(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT);
+ maria_disable_indexes_for_rebuild(file, rows, all_keys);
+ }
if (share->now_transactional)
{
bulk_insert_single_undo= BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR;
@@ -2245,8 +2257,6 @@ bool ha_maria::is_crashed() const
int ha_maria::update_row(const uchar * old_data, uchar * new_data)
{
CHECK_UNTIL_WE_FULLY_IMPLEMENTED_VERSIONING("UPDATE in WRITE CONCURRENT");
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
return maria_update(file, old_data, new_data);
}
diff --git a/storage/maria/ha_maria.h b/storage/maria/ha_maria.h
index 545daca12fe..36a6d36a45c 100644
--- a/storage/maria/ha_maria.h
+++ b/storage/maria/ha_maria.h
@@ -59,8 +59,6 @@ public:
ha_maria(handlerton *hton, TABLE_SHARE * table_arg);
~ha_maria() {}
handler *clone(const char *name, MEM_ROOT *mem_root);
- const char *table_type() const
- { return "Aria"; }
const char *index_type(uint key_number);
const char **bas_ext() const;
ulonglong table_flags() const
@@ -127,7 +125,7 @@ public:
int disable_indexes(uint mode);
int enable_indexes(uint mode);
int indexes_are_disabled(void);
- void start_bulk_insert(ha_rows rows);
+ void start_bulk_insert(ha_rows rows, uint flags);
int end_bulk_insert();
ha_rows records_in_range(uint inx, key_range * min_key, key_range * max_key);
void update_create_info(HA_CREATE_INFO * create_info);
diff --git a/storage/maria/ma_bitmap.c b/storage/maria/ma_bitmap.c
index f5d32d6a191..e3668d3c8d3 100644
--- a/storage/maria/ma_bitmap.c
+++ b/storage/maria/ma_bitmap.c
@@ -239,7 +239,7 @@ my_bool _ma_bitmap_init(MARIA_SHARE *share, File file,
if (((bitmap->map= (uchar*) my_malloc(size, MYF(MY_WME))) == NULL) ||
my_init_dynamic_array(&bitmap->pinned_pages,
- sizeof(MARIA_PINNED_PAGE), 1, 1))
+ sizeof(MARIA_PINNED_PAGE), 1, 1, MYF(0)))
return 1;
bitmap->block_size= share->block_size;
diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c
index 71faa11fd2b..55b9a137050 100644
--- a/storage/maria/ma_blockrec.c
+++ b/storage/maria/ma_blockrec.c
@@ -528,7 +528,7 @@ my_bool _ma_init_block_record(MARIA_HA *info)
if (my_init_dynamic_array(&info->bitmap_blocks,
sizeof(MARIA_BITMAP_BLOCK), default_extents,
- 64))
+ 64, MYF(0)))
goto err;
info->cur_row.extents_buffer_length= default_extents * ROW_EXTENT_SIZE;
if (!(info->cur_row.extents= my_malloc(info->cur_row.extents_buffer_length,
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index 8e68f5a7898..ab9080c40fb 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -3765,7 +3765,8 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info,
param->read_cache.end_of_file= sort_info.filelength;
sort_param.wordlist=NULL;
- init_alloc_root(&sort_param.wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0);
+ init_alloc_root(&sort_param.wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0,
+ MYF(param->malloc_flags));
sort_param.key_cmp=sort_key_cmp;
sort_param.lock_in_memory=maria_lock_memory;
@@ -3879,7 +3880,10 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info,
(my_bool) (!(param->testflag & T_VERBOSE)),
(size_t) param->sort_buffer_length))
{
- param->retry_repair=1;
+ if ((param->testflag & T_CREATE_UNIQUE_BY_SORT) && sort_param.sort_info->dupp)
+ share->state.dupp_key= sort_param.key;
+ else
+ param->retry_repair= 1;
_ma_check_print_error(param, "Create index by sort failed");
goto err;
}
@@ -4399,7 +4403,8 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info,
(FT_MAX_WORD_LEN_FOR_SORT *
sort_param[i].keyinfo->seg->charset->mbmaxlen);
sort_param[i].key_length+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN;
- init_alloc_root(&sort_param[i].wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0);
+ init_alloc_root(&sort_param[i].wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0,
+ MYF(param->malloc_flags));
}
}
sort_info.total_keys=i;
@@ -5498,6 +5503,9 @@ static int sort_key_write(MARIA_SORT_PARAM *sort_param, const uchar *a)
sort_info->dupp++;
sort_info->info->cur_row.lastpos= get_record_for_key(sort_param->keyinfo,
a);
+ if ((param->testflag & (T_CREATE_UNIQUE_BY_SORT | T_SUPPRESS_ERR_HANDLING))
+ == T_CREATE_UNIQUE_BY_SORT)
+ param->testflag|= T_SUPPRESS_ERR_HANDLING;
_ma_check_print_warning(param,
"Duplicate key %2u for record at %10s against "
"record at %10s",
@@ -6410,7 +6418,7 @@ static my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows)
}
/*
- Deactivate all not unique index that can be recreated fast
+ Deactivate all indexes that can be recreated fast.
These include packed keys on which sorting will use more temporary
space than the max allowed file length or for which the unpacked keys
will take much more space than packed keys.
@@ -6418,7 +6426,8 @@ static my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows)
rows we will put into the file.
*/
-void maria_disable_non_unique_index(MARIA_HA *info, ha_rows rows)
+void maria_disable_indexes_for_rebuild(MARIA_HA *info, ha_rows rows,
+ my_bool all_keys)
{
MARIA_SHARE *share= info->s;
MARIA_KEYDEF *key=share->keyinfo;
@@ -6428,12 +6437,13 @@ void maria_disable_non_unique_index(MARIA_HA *info, ha_rows rows)
(!rows || rows >= MARIA_MIN_ROWS_TO_DISABLE_INDEXES));
for (i=0 ; i < share->base.keys ; i++,key++)
{
- if (!(key->flag &
- (HA_NOSAME | HA_SPATIAL | HA_AUTO_KEY | HA_RTREE_INDEX)) &&
- ! maria_too_big_key_for_sort(key,rows) && share->base.auto_key != i+1)
+ if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY | HA_RTREE_INDEX)) &&
+ ! maria_too_big_key_for_sort(key,rows) && share->base.auto_key != i+1 &&
+ (all_keys || !(key->flag & HA_NOSAME)))
{
maria_clear_key_active(share->state.key_map, i);
info->update|= HA_STATE_CHANGED;
+ info->create_unique_index_by_sort= all_keys;
}
}
}
diff --git a/storage/maria/ma_ft_boolean_search.c b/storage/maria/ma_ft_boolean_search.c
index 83ae08553ef..c98c4b599fc 100644
--- a/storage/maria/ma_ft_boolean_search.c
+++ b/storage/maria/ma_ft_boolean_search.c
@@ -527,7 +527,7 @@ static void _ftb_init_index_search(FT_INFO *ftb)
/* 4 */
if (!is_tree_inited(& ftb->no_dupes))
init_tree(& ftb->no_dupes,0,0,sizeof(my_off_t),
- _ftb_no_dupes_cmp,0,0,0);
+ _ftb_no_dupes_cmp,0,0,0);
else
reset_tree(& ftb->no_dupes);
}
@@ -561,7 +561,7 @@ FT_INFO * maria_ft_init_boolean_search(MARIA_HA *info, uint keynr,
bzero(& ftb->no_dupes, sizeof(TREE));
ftb->last_word= 0;
- init_alloc_root(&ftb->mem_root, 1024, 1024);
+ init_alloc_root(&ftb->mem_root, 1024, 1024, 0);
ftb->queue.max_elements= 0;
if (!(ftbe=(FTB_EXPR *)alloc_root(&ftb->mem_root, sizeof(FTB_EXPR))))
goto err;
diff --git a/storage/maria/ma_ft_nlq_search.c b/storage/maria/ma_ft_nlq_search.c
index c8b87c4f42c..c6d9c2411c6 100644
--- a/storage/maria/ma_ft_nlq_search.c
+++ b/storage/maria/ma_ft_nlq_search.c
@@ -239,8 +239,8 @@ FT_INFO *maria_ft_init_nlq_search(MARIA_HA *info, uint keynr, uchar *query,
bzero(&wtree,sizeof(wtree));
- init_tree(&aio.dtree,0,0,sizeof(FT_SUPERDOC),(qsort_cmp2)&FT_SUPERDOC_cmp,0,
- NULL, NULL);
+ init_tree(&aio.dtree,0,0,sizeof(FT_SUPERDOC),(qsort_cmp2)&FT_SUPERDOC_cmp,
+ NULL, NULL, MYF(0));
maria_ft_parse_init(&wtree, aio.charset);
ftparser_param->flags= 0;
diff --git a/storage/maria/ma_ft_parser.c b/storage/maria/ma_ft_parser.c
index 3dd6fab5214..81b5ea5119c 100644
--- a/storage/maria/ma_ft_parser.c
+++ b/storage/maria/ma_ft_parser.c
@@ -254,8 +254,8 @@ void maria_ft_parse_init(TREE *wtree, CHARSET_INFO *cs)
{
DBUG_ENTER("maria_ft_parse_init");
if (!is_tree_inited(wtree))
- init_tree(wtree,0,0,sizeof(FT_WORD),(qsort_cmp2)&FT_WORD_cmp,0, NULL,
- (void*) cs);
+ init_tree(wtree,0,0,sizeof(FT_WORD),(qsort_cmp2)&FT_WORD_cmp, NULL,
+ (void*) cs, MYF(0));
DBUG_VOID_RETURN;
}
@@ -348,7 +348,7 @@ MYSQL_FTPARSER_PARAM* maria_ftparser_alloc_param(MARIA_HA *info)
info->ftparser_param= (MYSQL_FTPARSER_PARAM *)
my_malloc(MAX_PARAM_NR * sizeof(MYSQL_FTPARSER_PARAM) *
info->s->ftkeys, MYF(MY_WME | MY_ZEROFILL));
- init_alloc_root(&info->ft_memroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0);
+ init_alloc_root(&info->ft_memroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0, MYF(0));
}
return info->ftparser_param;
}
diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c
index 341bbc6088f..56926c048d8 100644
--- a/storage/maria/ma_loghandler.c
+++ b/storage/maria/ma_loghandler.c
@@ -3629,10 +3629,10 @@ my_bool translog_init_with_table(const char *directory,
mysql_rwlock_init(key_TRANSLOG_DESCRIPTOR_open_files_lock,
&log_descriptor.open_files_lock) ||
my_init_dynamic_array(&log_descriptor.open_files,
- sizeof(TRANSLOG_FILE*), 10, 10) ||
+ sizeof(TRANSLOG_FILE*), 10, 10, MYF(0)) ||
my_init_dynamic_array(&log_descriptor.unfinished_files,
sizeof(struct st_file_counter),
- 10, 10))
+ 10, 10, MYF(0)))
goto err;
log_descriptor.min_need_file= 0;
log_descriptor.min_file_number= 0;
@@ -5528,7 +5528,7 @@ translog_write_variable_record_mgroup(LSN *lsn,
if (my_init_dynamic_array(&groups,
sizeof(struct st_translog_group_descriptor),
- 10, 10))
+ 10, 10, MYF(0)))
{
translog_unlock();
DBUG_PRINT("error", ("init array failed"));
diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c
index 5f90f61c786..88422e3dc5f 100644
--- a/storage/maria/ma_open.c
+++ b/storage/maria/ma_open.c
@@ -160,7 +160,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, const char *name,
if (my_init_dynamic_array(&info.pinned_pages,
sizeof(MARIA_PINNED_PAGE),
max(share->base.blobs*2 + 4,
- MARIA_MAX_TREE_LEVELS*3), 16))
+ MARIA_MAX_TREE_LEVELS*3), 16, MYF(0)))
goto err;
diff --git a/storage/maria/ma_sort.c b/storage/maria/ma_sort.c
index e1e7b323683..4bc179c3008 100644
--- a/storage/maria/ma_sort.c
+++ b/storage/maria/ma_sort.c
@@ -164,7 +164,7 @@ int _ma_create_index_by_sort(MARIA_SORT_PARAM *info, my_bool no_messages,
HA_FT_MAXBYTELEN, MYF(0))))
{
if (my_init_dynamic_array(&buffpek, sizeof(BUFFPEK), maxbuffer,
- maxbuffer/2))
+ maxbuffer/2, MYF(0)))
{
my_free(sort_keys);
sort_keys= 0;
@@ -397,7 +397,7 @@ pthread_handler_t _ma_thr_find_all_keys(void *arg)
HA_FT_MAXBYTELEN : 0), MYF(0))))
{
if (my_init_dynamic_array(&sort_param->buffpek, sizeof(BUFFPEK),
- maxbuffer, maxbuffer/2))
+ maxbuffer, maxbuffer/2, MYF(0)))
{
my_free(sort_keys);
sort_keys= (uchar **) NULL; /* for err: label */
diff --git a/storage/maria/ma_write.c b/storage/maria/ma_write.c
index f1649083105..752741d0a5e 100644
--- a/storage/maria/ma_write.c
+++ b/storage/maria/ma_write.c
@@ -881,7 +881,7 @@ ChangeSet@1.2562, 2008-04-09 07:41:40+02:00, serg@janus.mylan +9 -0
/* Yup. converting */
info->ft1_to_ft2=(DYNAMIC_ARRAY *)
my_malloc(sizeof(DYNAMIC_ARRAY), MYF(MY_WME));
- my_init_dynamic_array(info->ft1_to_ft2, ft2len, 300, 50);
+ my_init_dynamic_array(info->ft1_to_ft2, ft2len, 300, 50, MYF(0));
/*
Now, adding all keys from the page to dynarray
@@ -1766,8 +1766,8 @@ int maria_init_bulk_insert(MARIA_HA *info, ulong cache_size, ha_rows rows)
init_tree(&info->bulk_insert[i],
cache_size * key[i].maxlength,
cache_size * key[i].maxlength, 0,
- (qsort_cmp2)keys_compare, 0,
- (tree_element_free) keys_free, (void *)params++);
+ (qsort_cmp2)keys_compare,
+ (tree_element_free) keys_free, (void *)params++, MYF(0));
}
else
info->bulk_insert[i].root=0;
diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h
index 1eb720c9607..a5c448cf0f7 100644
--- a/storage/maria/maria_def.h
+++ b/storage/maria/maria_def.h
@@ -196,6 +196,9 @@ typedef struct st_maria_state_info
/* LSN when we wrote file id to the log */
LSN logrec_file_id;
+ uint8 dupp_key; /* Lastly processed index with */
+ /* violated uniqueness constraint */
+
/* the following isn't saved on disk */
uint state_diff_length; /* Should be 0 */
uint state_length; /* Length of state header in file */
@@ -667,6 +670,7 @@ struct st_maria_handler
uchar length_buff[5]; /* temp buff to store blob lengths */
int maria_rtree_recursion_depth;
+ my_bool create_unique_index_by_sort;
index_cond_func_t index_cond_func; /* Index condition function */
void *index_cond_func_arg; /* parameter for the func */
};
diff --git a/storage/maria/maria_pack.c b/storage/maria/maria_pack.c
index 481b77a2cc6..40686995378 100644
--- a/storage/maria/maria_pack.c
+++ b/storage/maria/maria_pack.c
@@ -798,8 +798,8 @@ static HUFF_COUNTS *init_huff_count(MARIA_HA *info,my_off_t records)
'tree_pos'. It's keys are implemented by pointers into 'tree_buff'.
This is accomplished by '-1' as the element size.
*/
- init_tree(&count[i].int_tree,0,0,-1,(qsort_cmp2) compare_tree,0, NULL,
- NULL);
+ init_tree(&count[i].int_tree,0,0,-1,(qsort_cmp2) compare_tree, NULL,
+ NULL, MYF(0));
if (records && type != FIELD_BLOB && type != FIELD_VARCHAR)
count[i].tree_pos=count[i].tree_buff =
my_malloc(count[i].field_length > 1 ? tree_buff_length : 2,
diff --git a/storage/maria/unittest/sequence_storage.c b/storage/maria/unittest/sequence_storage.c
index d5db20d31ca..4563ea76c64 100644
--- a/storage/maria/unittest/sequence_storage.c
+++ b/storage/maria/unittest/sequence_storage.c
@@ -33,7 +33,7 @@ my_bool seq_storage_reader_init(SEQ_STORAGE *seq, const char *file)
seq->pos= 0;
if ((fd= my_fopen(file, O_RDONLY, MYF(MY_WME))) == NULL)
return 1;
- if (my_init_dynamic_array(&seq->seq, sizeof(ulong), 10, 10))
+ if (my_init_dynamic_array(&seq->seq, sizeof(ulong), 10, 10, MYF(0)))
return 1;
for(;;)
diff --git a/storage/myisam/ft_boolean_search.c b/storage/myisam/ft_boolean_search.c
index 8b61e1dc4f2..70a7037a446 100644
--- a/storage/myisam/ft_boolean_search.c
+++ b/storage/myisam/ft_boolean_search.c
@@ -550,7 +550,7 @@ static void _ftb_init_index_search(FT_INFO *ftb)
/* 4 */
if (!is_tree_inited(& ftb->no_dupes))
init_tree(& ftb->no_dupes,0,0,sizeof(my_off_t),
- _ftb_no_dupes_cmp,0,0,0);
+ _ftb_no_dupes_cmp,0,0,MYF(0));
else
reset_tree(& ftb->no_dupes);
}
@@ -583,7 +583,7 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, uchar *query,
bzero(& ftb->no_dupes, sizeof(TREE));
ftb->last_word= 0;
- init_alloc_root(&ftb->mem_root, 1024, 1024);
+ init_alloc_root(&ftb->mem_root, 1024, 1024, MYF(0));
ftb->queue.max_elements= 0;
if (!(ftbe=(FTB_EXPR *)alloc_root(&ftb->mem_root, sizeof(FTB_EXPR))))
goto err;
diff --git a/storage/myisam/ft_nlq_search.c b/storage/myisam/ft_nlq_search.c
index bafa7064e28..9524b6f1833 100644
--- a/storage/myisam/ft_nlq_search.c
+++ b/storage/myisam/ft_nlq_search.c
@@ -248,8 +248,8 @@ FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, uchar *query,
bzero(&wtree,sizeof(wtree));
- init_tree(&aio.dtree,0,0,sizeof(FT_SUPERDOC),(qsort_cmp2)&FT_SUPERDOC_cmp,0,
- NULL, NULL);
+ init_tree(&aio.dtree,0,0,sizeof(FT_SUPERDOC),(qsort_cmp2)&FT_SUPERDOC_cmp,
+ NULL, NULL, MYF(0));
ft_parse_init(&wtree, aio.charset);
ftparser_param->flags= 0;
diff --git a/storage/myisam/ft_parser.c b/storage/myisam/ft_parser.c
index c534a9a060b..0e89d7d1b3a 100644
--- a/storage/myisam/ft_parser.c
+++ b/storage/myisam/ft_parser.c
@@ -249,7 +249,8 @@ void ft_parse_init(TREE *wtree, CHARSET_INFO *cs)
{
DBUG_ENTER("ft_parse_init");
if (!is_tree_inited(wtree))
- init_tree(wtree,0,0,sizeof(FT_WORD),(qsort_cmp2)&FT_WORD_cmp,0,0,(void*)cs);
+ init_tree(wtree, 0, 0, sizeof(FT_WORD), (qsort_cmp2)&FT_WORD_cmp, 0,
+ (void*)cs, MYF(0));
DBUG_VOID_RETURN;
}
@@ -341,7 +342,7 @@ MYSQL_FTPARSER_PARAM* ftparser_alloc_param(MI_INFO *info)
info->ftparser_param= (MYSQL_FTPARSER_PARAM *)
my_malloc(MAX_PARAM_NR * sizeof(MYSQL_FTPARSER_PARAM) *
info->s->ftkeys, MYF(MY_WME | MY_ZEROFILL));
- init_alloc_root(&info->ft_memroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0);
+ init_alloc_root(&info->ft_memroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0, MYF(0));
}
return info->ftparser_param;
}
diff --git a/storage/myisam/ft_stopwords.c b/storage/myisam/ft_stopwords.c
index 8f6f9308d3c..45441c17bbb 100644
--- a/storage/myisam/ft_stopwords.c
+++ b/storage/myisam/ft_stopwords.c
@@ -61,9 +61,8 @@ int ft_init_stopwords()
if (!(stopwords3=(TREE *)my_malloc(sizeof(TREE),MYF(0))))
DBUG_RETURN(-1);
init_tree(stopwords3,0,0,sizeof(FT_STOPWORD),(qsort_cmp2)&FT_STOPWORD_cmp,
- 0,
(ft_stopword_file ? (tree_element_free)&FT_STOPWORD_free : 0),
- NULL);
+ NULL, MYF(0));
/*
Stopword engine currently does not support tricky
character sets UCS2, UTF16, UTF32.
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index df534242f87..2924903974f 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -139,6 +139,9 @@ static void mi_check_print_msg(HA_CHECK *param, const char* msg_type,
char msgbuf[MYSQL_ERRMSG_SIZE];
char name[NAME_LEN*2+2];
+ if (param->testflag & T_SUPPRESS_ERR_HANDLING)
+ return;
+
msg_length= my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args);
msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia
@@ -571,6 +574,8 @@ void mi_check_print_error(HA_CHECK *param, const char *fmt,...)
{
param->error_printed|=1;
param->out_flag|= O_DATA_LOST;
+ if (param->testflag & T_SUPPRESS_ERR_HANDLING)
+ return;
va_list args;
va_start(args, fmt);
mi_check_print_msg(param, "error", fmt, args);
@@ -833,10 +838,6 @@ int ha_myisam::close(void)
int ha_myisam::write_row(uchar *buf)
{
- /* If we have a timestamp column, update it to the current time */
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
-
/*
If we have an auto_increment column and we are writing a changed row
or a new row, then update the auto_increment value in the record.
@@ -1075,6 +1076,7 @@ int ha_myisam::repair(THD *thd, HA_CHECK &param, bool do_optimize)
param.thd= thd;
param.tmpdir= &mysql_tmpdir_list;
param.out_flag= 0;
+ share->state.dupp_key= MI_MAX_KEY;
strmov(fixed_name,file->filename);
// Release latches since this can take a long time
@@ -1132,6 +1134,10 @@ int ha_myisam::repair(THD *thd, HA_CHECK &param, bool do_optimize)
error = mi_repair_by_sort(&param, file, fixed_name,
test(param.testflag & T_QUICK));
}
+ if (error && file->create_unique_index_by_sort &&
+ share->state.dupp_key != MAX_KEY)
+ print_keydup_error(share->state.dupp_key,
+ ER(ER_DUP_ENTRY_WITH_KEY_NAME), MYF(0));
}
else
{
@@ -1439,6 +1445,8 @@ int ha_myisam::enable_indexes(uint mode)
param.op_name= "recreating_index";
param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
T_CREATE_MISSING_KEYS);
+ if (file->create_unique_index_by_sort)
+ param.testflag|= T_CREATE_UNIQUE_BY_SORT;
param.myf_rw&= ~MY_WAIT_IF_FULL;
param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
param.stats_method= (enum_handler_stats_method)THDVAR(thd, stats_method);
@@ -1506,15 +1514,16 @@ int ha_myisam::indexes_are_disabled(void)
activate special bulk-insert optimizations
SYNOPSIS
- start_bulk_insert(rows)
+ start_bulk_insert(rows, flags)
rows Rows to be inserted
0 if we don't know
+ flags Flags to control index creation
NOTICE
Do not forget to call end_bulk_insert() later!
*/
-void ha_myisam::start_bulk_insert(ha_rows rows)
+void ha_myisam::start_bulk_insert(ha_rows rows, uint flags)
{
DBUG_ENTER("ha_myisam::start_bulk_insert");
THD *thd= current_thd;
@@ -1548,7 +1557,10 @@ void ha_myisam::start_bulk_insert(ha_rows rows)
mi_clear_all_keys_active(file->s->state.key_map);
}
else
- mi_disable_non_unique_index(file,rows);
+ {
+ my_bool all_keys= test(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT);
+ mi_disable_indexes_for_rebuild(file, rows, all_keys);
+ }
}
else
if (!file->bulk_insert &&
@@ -1657,8 +1669,6 @@ bool ha_myisam::is_crashed() const
int ha_myisam::update_row(const uchar *old_data, uchar *new_data)
{
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
return mi_update(file,old_data,new_data);
}
diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h
index 79324f64370..3c0d10fae8c 100644
--- a/storage/myisam/ha_myisam.h
+++ b/storage/myisam/ha_myisam.h
@@ -56,7 +56,6 @@ class ha_myisam: public handler
ha_myisam(handlerton *hton, TABLE_SHARE *table_arg);
~ha_myisam() {}
handler *clone(const char *name, MEM_ROOT *mem_root);
- const char *table_type() const { return "MyISAM"; }
const char *index_type(uint key_number);
const char **bas_ext() const;
ulonglong table_flags() const { return int_table_flags; }
@@ -116,7 +115,7 @@ class ha_myisam: public handler
int disable_indexes(uint mode);
int enable_indexes(uint mode);
int indexes_are_disabled(void);
- void start_bulk_insert(ha_rows rows);
+ void start_bulk_insert(ha_rows rows, uint flags);
int end_bulk_insert();
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
void update_create_info(HA_CREATE_INFO *create_info);
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index 7e97a751376..056aff5a72b 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -2299,7 +2299,8 @@ int mi_repair_by_sort(HA_CHECK *param, register MI_INFO *info,
mysql_file_seek(param->read_cache.file, 0L, MY_SEEK_END, MYF(0));
sort_param.wordlist=NULL;
- init_alloc_root(&sort_param.wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0);
+ init_alloc_root(&sort_param.wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0,
+ MYF(param->malloc_flags));
if (share->data_file_type == DYNAMIC_RECORD)
length=max(share->base.min_pack_length+1,share->base.min_block_length);
@@ -2408,7 +2409,10 @@ int mi_repair_by_sort(HA_CHECK *param, register MI_INFO *info,
(my_bool) (!(param->testflag & T_VERBOSE)),
param->sort_buffer_length))
{
- param->retry_repair= 1;
+ if ((param->testflag & T_CREATE_UNIQUE_BY_SORT) && sort_param.sort_info->dupp)
+ share->state.dupp_key= sort_param.key;
+ else
+ param->retry_repair= 1;
if (! param->error_printed)
mi_check_print_error(param, "Couldn't fix table with create_index_by_sort(). Error: %d",
my_errno);
@@ -2866,7 +2870,8 @@ int mi_repair_parallel(HA_CHECK *param, register MI_INFO *info,
uint ft_max_word_len_for_sort=FT_MAX_WORD_LEN_FOR_SORT*
sort_param[i].keyinfo->seg->charset->mbmaxlen;
sort_param[i].key_length+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN;
- init_alloc_root(&sort_param[i].wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0);
+ init_alloc_root(&sort_param[i].wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0,
+ MYF(param->malloc_flags));
}
}
sort_info.total_keys=i;
@@ -3829,6 +3834,9 @@ static int sort_key_write(MI_SORT_PARAM *sort_param, const void *a)
sort_info->info->lastpos=get_record_for_key(sort_info->info,
sort_param->keyinfo,
(uchar*) a);
+ if ((param->testflag & (T_CREATE_UNIQUE_BY_SORT | T_SUPPRESS_ERR_HANDLING))
+ == T_CREATE_UNIQUE_BY_SORT)
+ param->testflag|= T_SUPPRESS_ERR_HANDLING;
mi_check_print_warning(param,
"Duplicate key for record at %10s against record at %10s",
llstr(sort_info->info->lastpos,llbuff),
@@ -4664,7 +4672,7 @@ static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
}
/*
- Deactivate all not unique index that can be recreated fast
+ Deactivate all indexes that can be recreated fast.
These include packed keys on which sorting will use more temporary
space than the max allowed file length or for which the unpacked keys
will take much more space than packed keys.
@@ -4672,7 +4680,8 @@ static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
rows we will put into the file.
*/
-void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows)
+void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows,
+ my_bool all_keys)
{
MYISAM_SHARE *share=info->s;
MI_KEYDEF *key=share->keyinfo;
@@ -4682,11 +4691,13 @@ void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows)
(!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES));
for (i=0 ; i < share->base.keys ; i++,key++)
{
- if (!(key->flag & (HA_NOSAME | HA_SPATIAL | HA_AUTO_KEY)) &&
- ! mi_too_big_key_for_sort(key,rows) && info->s->base.auto_key != i+1)
+ if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY)) &&
+ ! mi_too_big_key_for_sort(key,rows) && info->s->base.auto_key != i+1 &&
+ (all_keys || !(key->flag & HA_NOSAME)))
{
mi_clear_key_active(share->state.key_map, i);
info->update|= HA_STATE_CHANGED;
+ info->create_unique_index_by_sort= all_keys;
}
}
}
diff --git a/storage/myisam/mi_write.c b/storage/myisam/mi_write.c
index 70cc96d0cba..60ea7757827 100644
--- a/storage/myisam/mi_write.c
+++ b/storage/myisam/mi_write.c
@@ -544,7 +544,7 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo,
/* yup. converting */
info->ft1_to_ft2=(DYNAMIC_ARRAY *)
my_malloc(sizeof(DYNAMIC_ARRAY), MYF(MY_WME));
- my_init_dynamic_array(info->ft1_to_ft2, ft2len, 300, 50);
+ my_init_dynamic_array(info->ft1_to_ft2, ft2len, 300, 50, MYF(0));
/*
now, adding all keys from the page to dynarray
@@ -1013,8 +1013,8 @@ int mi_init_bulk_insert(MI_INFO *info, ulong cache_size, ha_rows rows)
init_tree(&info->bulk_insert[i],
cache_size * key[i].maxlength,
cache_size * key[i].maxlength, 0,
- (qsort_cmp2)keys_compare, 0,
- (tree_element_free) keys_free, (void *)params++);
+ (qsort_cmp2)keys_compare,
+ (tree_element_free) keys_free, (void *)params++, MYF(0));
}
else
info->bulk_insert[i].root=0;
diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h
index e3c1647cd59..a0377e34130 100644
--- a/storage/myisam/myisamdef.h
+++ b/storage/myisam/myisamdef.h
@@ -82,6 +82,9 @@ typedef struct st_mi_state_info
uint open_count;
uint8 changed; /* Changed since myisamchk */
+ uint8 dupp_key; /* Lastly processed index with */
+ /* violated uniqueness constraint */
+
/* the following isn't saved on disk */
uint state_diff_length; /* Should be 0 */
uint state_length; /* Length of state header in file */
@@ -297,6 +300,7 @@ struct st_myisam_info
my_bool page_changed;
/* If info->buff has to be reread for rnext */
my_bool buff_used;
+ my_bool create_unique_index_by_sort;
index_cond_func_t index_cond_func; /* Index condition function */
void *index_cond_func_arg; /* parameter for the func */
THR_LOCK_DATA lock;
@@ -705,8 +709,8 @@ void mi_restore_status(void *param);
void mi_copy_status(void *to, void *from);
my_bool mi_check_status(void *param);
void mi_fix_status(MI_INFO *org_table, MI_INFO *new_table);
-void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows);
-
+void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows,
+ my_bool all_keys);
extern MI_INFO *test_if_reopen(char *filename);
my_bool check_table_is_closed(const char *name, const char *where);
int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, const char *orn_name,
diff --git a/storage/myisam/myisamlog.c b/storage/myisam/myisamlog.c
index 3314f1b94ac..1624213851b 100644
--- a/storage/myisam/myisamlog.c
+++ b/storage/myisam/myisamlog.c
@@ -329,8 +329,9 @@ static int examine_log(char * file_name, char **table_names)
init_io_cache(&cache,file,0,READ_CACHE,start_offset,0,MYF(0));
bzero((uchar*) com_count,sizeof(com_count));
- init_tree(&tree,0,0,sizeof(file_info),(qsort_cmp2) file_info_compare,1,
- (tree_element_free) file_info_free, NULL);
+ init_tree(&tree,0,0,sizeof(file_info),(qsort_cmp2) file_info_compare,
+ (tree_element_free) file_info_free, NULL,
+ MYF(MY_TREE_WITH_DELETE));
(void) init_key_cache(dflt_key_cache,KEY_CACHE_BLOCK_SIZE,KEY_CACHE_SIZE,
0, 0, 0);
diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c
index 5d6b03ff4b8..6ce88db87f5 100644
--- a/storage/myisam/myisampack.c
+++ b/storage/myisam/myisampack.c
@@ -816,8 +816,8 @@ static HUFF_COUNTS *init_huff_count(MI_INFO *info,my_off_t records)
'tree_pos'. It's keys are implemented by pointers into 'tree_buff'.
This is accomplished by '-1' as the element size.
*/
- init_tree(&count[i].int_tree,0,0,-1,(qsort_cmp2) compare_tree,0, NULL,
- NULL);
+ init_tree(&count[i].int_tree,0,0,-1,(qsort_cmp2) compare_tree, NULL,
+ NULL, MYF(0));
if (records && type != FIELD_BLOB && type != FIELD_VARCHAR)
count[i].tree_pos=count[i].tree_buff =
my_malloc(count[i].field_length > 1 ? tree_buff_length : 2,
diff --git a/storage/myisam/sort.c b/storage/myisam/sort.c
index 2c4639b3143..4af45ea02e9 100644
--- a/storage/myisam/sort.c
+++ b/storage/myisam/sort.c
@@ -165,7 +165,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
HA_FT_MAXBYTELEN, MYF(0))))
{
if (my_init_dynamic_array(&buffpek, sizeof(BUFFPEK), maxbuffer,
- maxbuffer/2))
+ maxbuffer/2, MYF(0)))
{
my_free(sort_keys);
sort_keys= 0;
@@ -389,7 +389,7 @@ pthread_handler_t thr_find_all_keys(void *arg)
HA_FT_MAXBYTELEN : 0), MYF(0))))
{
if (my_init_dynamic_array(&sort_param->buffpek, sizeof(BUFFPEK),
- maxbuffer, maxbuffer/2))
+ maxbuffer, maxbuffer/2, MYF(0)))
{
my_free(sort_keys);
sort_keys= (uchar **) NULL; /* for err: label */
diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc
index 47a3abb78d2..1883a3955cc 100644
--- a/storage/myisammrg/ha_myisammrg.cc
+++ b/storage/myisammrg/ha_myisammrg.cc
@@ -120,7 +120,7 @@ ha_myisammrg::ha_myisammrg(handlerton *hton, TABLE_SHARE *table_arg)
:handler(hton, table_arg), file(0), is_cloned(0)
{
init_sql_alloc(&children_mem_root,
- FN_REFLEN + ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
+ FN_REFLEN + ALLOC_ROOT_MIN_BLOCK_SIZE, 0, MYF(0));
}
@@ -1078,8 +1078,6 @@ int ha_myisammrg::write_row(uchar * buf)
if (file->merge_insert_method == MERGE_INSERT_DISABLED || !file->tables)
DBUG_RETURN(HA_ERR_TABLE_READONLY);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
if (table->next_number_field && buf == table->record[0])
{
int error;
@@ -1093,8 +1091,6 @@ int ha_myisammrg::update_row(const uchar * old_data, uchar * new_data)
{
DBUG_ASSERT(this->file->children_attached);
ha_statistic_increment(&SSV::ha_update_count);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
return myrg_update(file,old_data,new_data);
}
@@ -1721,28 +1717,11 @@ static int myisammrg_init(void *p)
struct st_mysql_storage_engine myisammrg_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
-mysql_declare_plugin(myisammrg)
-{
- MYSQL_STORAGE_ENGINE_PLUGIN,
- &myisammrg_storage_engine,
- "MRG_MYISAM",
- "MySQL AB",
- "Collection of identical MyISAM tables",
- PLUGIN_LICENSE_GPL,
- myisammrg_init, /* Plugin Init */
- NULL, /* Plugin Deinit */
- 0x0100, /* 1.0 */
- NULL, /* status variables */
- NULL, /* system variables */
- NULL, /* config options */
- 0, /* flags */
-}
-mysql_declare_plugin_end;
maria_declare_plugin(myisammrg)
{
MYSQL_STORAGE_ENGINE_PLUGIN,
&myisammrg_storage_engine,
- "MRG_MYISAM",
+ "MRG_MyISAM",
"MySQL AB",
"Collection of identical MyISAM tables",
PLUGIN_LICENSE_GPL,
diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h
index f5ba2ffef38..8007e7d04e8 100644
--- a/storage/myisammrg/ha_myisammrg.h
+++ b/storage/myisammrg/ha_myisammrg.h
@@ -82,7 +82,6 @@ public:
ha_myisammrg(handlerton *hton, TABLE_SHARE *table_arg);
~ha_myisammrg();
- const char *table_type() const { return "MRG_MyISAM"; }
const char **bas_ext() const;
const char *index_type(uint key_number);
ulonglong table_flags() const
diff --git a/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff b/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff
index 20431a9c713..094b26668c1 100644
--- a/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff
+++ b/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff
@@ -67,7 +67,7 @@
< 4 5
< DROP TABLE t1;
---
-> ERROR HY000: MRG_MYISAM storage engine does not support computed columns
+> ERROR HY000: MRG_MyISAM storage engine does not support computed columns
> # ERROR: Statement ended with errno 1910, errname ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS (expected to succeed)
> # ------------ UNEXPECTED RESULT ------------
> # [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b INT(11) /*!*/ /*Custom column options*/ GENERATED ALWAYS AS (a+1)) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST ]
diff --git a/storage/oqgraph/ha_oqgraph.h b/storage/oqgraph/ha_oqgraph.h
index 97f8cb54081..ee88e38c526 100644
--- a/storage/oqgraph/ha_oqgraph.h
+++ b/storage/oqgraph/ha_oqgraph.h
@@ -62,10 +62,6 @@ public:
Table_flags table_flags() const;
#endif
~ha_oqgraph() {}
- const char *table_type() const
- {
- return "OQGRAPH";
- }
const char *index_type(uint inx)
{
return "HASH";
diff --git a/storage/perfschema/ha_perfschema.h b/storage/perfschema/ha_perfschema.h
index 17ab601e60f..35670339599 100644
--- a/storage/perfschema/ha_perfschema.h
+++ b/storage/perfschema/ha_perfschema.h
@@ -42,8 +42,6 @@ public:
~ha_perfschema();
- const char *table_type(void) const { return pfs_engine_name; }
-
const char *index_type(uint) { return ""; }
const char **bas_ext(void) const;
diff --git a/storage/perfschema/pfs_check.cc b/storage/perfschema/pfs_check.cc
index c52be6f0da2..2ab54ab1a4a 100644
--- a/storage/perfschema/pfs_check.cc
+++ b/storage/perfschema/pfs_check.cc
@@ -56,6 +56,8 @@ void check_performance_schema()
PFS_engine_table_share::check_all_tables(thd);
delete thd;
+ /* Remember that we don't have a THD */
+ set_current_thd(0);
DBUG_VOID_RETURN;
}
diff --git a/storage/perfschema/pfs_instr.cc b/storage/perfschema/pfs_instr.cc
index 8da1a9862e1..c9c0a0b1d80 100644
--- a/storage/perfschema/pfs_instr.cc
+++ b/storage/perfschema/pfs_instr.cc
@@ -821,8 +821,6 @@ const char *sanitize_file_name(const char *unsafe)
*/
void destroy_thread(PFS_thread *pfs)
{
- DBUG_ENTER("destroy_thread");
-
DBUG_ASSERT(pfs != NULL);
if (pfs->m_filename_hash_pins)
{
@@ -835,7 +833,6 @@ void destroy_thread(PFS_thread *pfs)
pfs->m_table_share_hash_pins= NULL;
}
pfs->m_lock.allocated_to_free();
- DBUG_VOID_RETURN;
}
/**
diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc
index 93cb43f3a92..3305875c124 100644
--- a/storage/sphinx/ha_sphinx.cc
+++ b/storage/sphinx/ha_sphinx.cc
@@ -3591,12 +3591,12 @@ struct st_mysql_storage_engine sphinx_storage_engine =
struct st_mysql_show_var sphinx_status_vars[] =
{
- {"Sphinx_total", (char *)sphinx_showfunc_total, SHOW_FUNC},
- {"Sphinx_total_found", (char *)sphinx_showfunc_total_found, SHOW_FUNC},
- {"Sphinx_time", (char *)sphinx_showfunc_time, SHOW_FUNC},
- {"Sphinx_word_count", (char *)sphinx_showfunc_word_count, SHOW_FUNC},
- {"Sphinx_words", (char *)sphinx_showfunc_words, SHOW_FUNC},
- {"Sphinx_error", (char *)sphinx_showfunc_error, SHOW_FUNC},
+ {"Sphinx_total", (char *)sphinx_showfunc_total, SHOW_SIMPLE_FUNC},
+ {"Sphinx_total_found", (char *)sphinx_showfunc_total_found, SHOW_SIMPLE_FUNC},
+ {"Sphinx_time", (char *)sphinx_showfunc_time, SHOW_SIMPLE_FUNC},
+ {"Sphinx_word_count", (char *)sphinx_showfunc_word_count, SHOW_SIMPLE_FUNC},
+ {"Sphinx_words", (char *)sphinx_showfunc_words, SHOW_SIMPLE_FUNC},
+ {"Sphinx_error", (char *)sphinx_showfunc_error, SHOW_SIMPLE_FUNC},
{0, 0, (enum_mysql_show_type)0}
};
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index 269bf8436a0..5a09bb36292 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -121,6 +121,7 @@ static ulong commit_threads = 0;
static mysql_mutex_t commit_threads_m;
static mysql_cond_t commit_cond;
static mysql_mutex_t commit_cond_m;
+static mysql_mutex_t pending_checkpoint_mutex;
static bool innodb_inited = 0;
@@ -254,11 +255,13 @@ static mysql_pfs_key_t innobase_share_mutex_key;
static mysql_pfs_key_t commit_threads_m_key;
static mysql_pfs_key_t commit_cond_mutex_key;
static mysql_pfs_key_t commit_cond_key;
+static mysql_pfs_key_t pending_checkpoint_mutex_key;
static PSI_mutex_info all_pthread_mutexes[] = {
{&commit_threads_m_key, "commit_threads_m", 0},
{&commit_cond_mutex_key, "commit_cond_mutex", 0},
- {&innobase_share_mutex_key, "innobase_share_mutex", 0}
+ {&innobase_share_mutex_key, "innobase_share_mutex", 0},
+ {&pending_checkpoint_mutex_key, "pending_checkpoint_mutex", 0}
};
static PSI_cond_info all_innodb_conds[] = {
@@ -387,6 +390,7 @@ static int innobase_rollback_to_savepoint(handlerton *hton, THD* thd,
static int innobase_savepoint(handlerton *hton, THD* thd, void *savepoint);
static int innobase_release_savepoint(handlerton *hton, THD* thd,
void *savepoint);
+static void innobase_checkpoint_request(handlerton *hton, void *cookie);
static handler *innobase_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root);
@@ -487,10 +491,17 @@ static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG,
NULL, NULL, 50, 1, 1024 * 1024 * 1024, 0);
static MYSQL_THDVAR_ULONG(flush_log_at_trx_commit, PLUGIN_VAR_OPCMDARG,
- "Set to 0 (write and flush once per second),"
- " 1 (write and flush at each commit)"
- " or 2 (write at commit, flush once per second).",
- NULL, NULL, 1, 0, 2, 0);
+ "Controls the durability/speed trade-off for commits."
+ " Set to 0 (write and flush redo log to disk only once per second),"
+ " 1 (flush to disk at each commit),"
+ " 2 (write to log at commit but flush to disk only once per second)"
+ " or 3 (flush to disk at prepare and at commit, slower and usually redundant)."
+ " 1 and 3 guarantees that after a crash, committed transactions will"
+ " not be lost and will be consistent with the binlog and other transactional"
+ " engines. 2 can get inconsistent and lose transactions if there is a"
+ " power failure or kernel crash but not if mysqld crashes. 0 has no"
+ " guarantees in case of crash. 0 and 2 can be faster than 1 or 3.",
+ NULL, NULL, 1, 0, 3, 0);
static MYSQL_THDVAR_BOOL(fake_changes, PLUGIN_VAR_OPCMDARG,
"In the transaction after enabled, UPDATE, INSERT and DELETE only move the cursor to the records "
@@ -2621,6 +2632,7 @@ innobase_init(
innobase_hton->recover=innobase_xa_recover;
innobase_hton->commit_by_xid=innobase_commit_by_xid;
innobase_hton->rollback_by_xid=innobase_rollback_by_xid;
+ innobase_hton->commit_checkpoint_request=innobase_checkpoint_request;
innobase_hton->checkpoint_state= innobase_checkpoint_state;
innobase_hton->create_cursor_read_view=innobase_create_cursor_view;
innobase_hton->set_cursor_read_view=innobase_set_cursor_view;
@@ -3162,6 +3174,9 @@ innobase_change_buffering_inited_ok:
mysql_mutex_init(commit_cond_mutex_key,
&commit_cond_m, MY_MUTEX_INIT_FAST);
mysql_cond_init(commit_cond_key, &commit_cond, NULL);
+ mysql_mutex_init(pending_checkpoint_mutex_key,
+ &pending_checkpoint_mutex,
+ MY_MUTEX_INIT_FAST);
innodb_inited= 1;
#ifdef MYSQL_DYNAMIC_PLUGIN
if (innobase_hton != p) {
@@ -3209,6 +3224,7 @@ innobase_end(
mysql_mutex_destroy(&commit_threads_m);
mysql_mutex_destroy(&commit_cond_m);
mysql_cond_destroy(&commit_cond);
+ mysql_mutex_destroy(&pending_checkpoint_mutex);
}
DBUG_RETURN(err);
@@ -3286,6 +3302,29 @@ innobase_commit_low(
trx_t* trx) /*!< in: transaction handle */
{
if (trx_is_started(trx)) {
+#ifdef HAVE_REPLICATION
+#ifdef MYSQL_SERVER
+ THD *thd=current_thd;
+
+ if (innobase_overwrite_relay_log_info &&
+ thd && thd_is_replication_slave_thread(thd) &&
+ thd->connection_name.length) {
+ /* Update the replication position info inside InnoDB.
+ In embedded server, does nothing. */
+ const char *log_file_name, *group_relay_log_name;
+ ulonglong log_pos, relay_log_pos;
+ bool res = rpl_get_position_info(&log_file_name, &log_pos,
+ &group_relay_log_name,
+ &relay_log_pos);
+ if (res) {
+ trx->mysql_master_log_file_name = log_file_name;
+ trx->mysql_master_log_pos = (ib_int64_t)log_pos;
+ trx->mysql_relay_log_file_name = group_relay_log_name;
+ trx->mysql_relay_log_pos = (ib_int64_t)relay_log_pos;
+ }
+ }
+#endif /* MYSQL_SERVER */
+#endif /* HAVE_REPLICATION */
/* Save the current replication position for write to trx sys
header for undo purposes, see the comment at corresponding call
@@ -3616,6 +3655,147 @@ innobase_rollback_trx(
DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL));
}
+
+struct pending_checkpoint {
+ struct pending_checkpoint *next;
+ handlerton *hton;
+ void *cookie;
+ ib_uint64_t lsn;
+};
+static struct pending_checkpoint *pending_checkpoint_list;
+static struct pending_checkpoint *pending_checkpoint_list_end;
+
+/*****************************************************************//**
+Handle a commit checkpoint request from server layer.
+We put the request in a queue, so that we can notify upper layer about
+checkpoint complete when we have flushed the redo log.
+If we have already flushed all relevant redo log, we notify immediately.*/
+static
+void
+innobase_checkpoint_request(
+ handlerton *hton,
+ void *cookie)
+{
+ ib_uint64_t lsn;
+ ib_uint64_t flush_lsn;
+ struct pending_checkpoint * entry;
+
+ /* Do the allocation outside of lock to reduce contention. The normal
+ case is that not everything is flushed, so we will need to enqueue. */
+ entry = static_cast<struct pending_checkpoint *>
+ (my_malloc(sizeof(*entry), MYF(MY_WME)));
+ if (!entry) {
+ sql_print_error("Failed to allocate %u bytes."
+ " Commit checkpoint will be skipped.",
+ static_cast<unsigned>(sizeof(*entry)));
+ return;
+ }
+
+ entry->next = NULL;
+ entry->hton = hton;
+ entry->cookie = cookie;
+
+ mysql_mutex_lock(&pending_checkpoint_mutex);
+ lsn = log_get_lsn();
+ flush_lsn = log_get_flush_lsn();
+ if (lsn > flush_lsn) {
+ /* Put the request in queue.
+ When the log gets flushed past the lsn, we will remove the
+ entry from the queue and notify the upper layer. */
+ entry->lsn = lsn;
+ if (pending_checkpoint_list_end) {
+ pending_checkpoint_list_end->next = entry;
+ /* There is no need to order the entries in the list
+ by lsn. The upper layer can accept notifications in
+ any order, and short delays in notifications do not
+ significantly impact performance. */
+ } else {
+ pending_checkpoint_list = entry;
+ }
+ pending_checkpoint_list_end = entry;
+ entry = NULL;
+ }
+ mysql_mutex_unlock(&pending_checkpoint_mutex);
+
+ if (entry) {
+ /* We are already flushed. Notify the checkpoint immediately. */
+ commit_checkpoint_notify_ha(entry->hton, entry->cookie);
+ my_free(entry);
+ }
+}
+
+/*****************************************************************//**
+Log code calls this whenever log has been written and/or flushed up
+to a new position. We use this to notify upper layer of a new commit
+checkpoint when necessary.*/
+extern "C" UNIV_INTERN
+void
+innobase_mysql_log_notify(
+/*===============*/
+ ib_uint64_t write_lsn, /*!< in: LSN written to log file */
+ ib_uint64_t flush_lsn) /*!< in: LSN flushed to disk */
+{
+ struct pending_checkpoint * pending;
+ struct pending_checkpoint * entry;
+ struct pending_checkpoint * last_ready;
+
+ /* It is safe to do a quick check for NULL first without lock.
+ Even if we should race, we will at most skip one checkpoint and
+ take the next one, which is harmless. */
+ if (!pending_checkpoint_list)
+ return;
+
+ mysql_mutex_lock(&pending_checkpoint_mutex);
+ pending = pending_checkpoint_list;
+ if (!pending)
+ {
+ mysql_mutex_unlock(&pending_checkpoint_mutex);
+ return;
+ }
+
+ last_ready = NULL;
+ for (entry = pending; entry != NULL; entry = entry -> next)
+ {
+ /* Notify checkpoints up until the first entry that has not
+ been fully flushed to the redo log. Since we do not maintain
+ the list ordered, in principle there could be more entries
+ later than were also flushed. But there is no harm in
+ delaying notifications for those a bit. And in practise, the
+ list is unlikely to have more than one element anyway, as we
+ flush the redo log at least once every second. */
+ if (entry->lsn > flush_lsn)
+ break;
+ last_ready = entry;
+ }
+
+ if (last_ready)
+ {
+ /* We found some pending checkpoints that are now flushed to
+ disk. So remove them from the list. */
+ pending_checkpoint_list = entry;
+ if (!entry)
+ pending_checkpoint_list_end = NULL;
+ }
+
+ mysql_mutex_unlock(&pending_checkpoint_mutex);
+
+ if (!last_ready)
+ return;
+
+ /* Now that we have released the lock, notify upper layer about all
+ commit checkpoints that have now completed. */
+ for (;;) {
+ entry = pending;
+ pending = pending->next;
+
+ commit_checkpoint_notify_ha(entry->hton, entry->cookie);
+
+ my_free(entry);
+ if (entry == last_ready)
+ break;
+ }
+}
+
/*****************************************************************//**
Rolls back a transaction to a savepoint.
@return 0 if success, HA_ERR_NO_SAVEPOINT if no savepoint with the
@@ -3868,17 +4048,6 @@ static const char* ha_innobase_exts[] = {
};
/****************************************************************//**
-Returns the table type (storage engine name).
-@return table type */
-UNIV_INTERN
-const char*
-ha_innobase::table_type() const
-/*===========================*/
-{
- return(innobase_hton_name);
-}
-
-/****************************************************************//**
Returns the index type. */
UNIV_INTERN
const char*
@@ -6035,9 +6204,6 @@ ha_innobase::write_row(
DBUG_RETURN(HA_ERR_CRASHED);
}
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
-
sql_command = thd_sql_command(user_thd);
if ((sql_command == SQLCOM_ALTER_TABLE
@@ -6463,9 +6629,6 @@ ha_innobase::update_row(
DBUG_RETURN(HA_ERR_CRASHED);
}
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
- table->timestamp_field->set_time();
-
if (prebuilt->upd_node) {
uvect = prebuilt->upd_node->update;
} else {
@@ -7846,7 +8009,7 @@ get_row_format_name(
if (!srv_file_per_table) { \
push_warning_printf( \
thd, MYSQL_ERROR::WARN_LEVEL_WARN, \
- ER_ILLEGAL_HA_CREATE_OPTION, \
+ HA_WRONG_CREATE_OPTION, \
"InnoDB: ROW_FORMAT=%s requires" \
" innodb_file_per_table.", \
get_row_format_name(row_format)); \
@@ -7858,7 +8021,7 @@ get_row_format_name(
if (srv_file_format < DICT_TF_FORMAT_ZIP) { \
push_warning_printf( \
thd, MYSQL_ERROR::WARN_LEVEL_WARN, \
- ER_ILLEGAL_HA_CREATE_OPTION, \
+ HA_WRONG_CREATE_OPTION, \
"InnoDB: ROW_FORMAT=%s requires" \
" innodb_file_format > Antelope.", \
get_row_format_name(row_format)); \
@@ -7908,7 +8071,7 @@ create_options_are_valid(
if (!srv_file_per_table) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_per_table.");
ret = FALSE;
@@ -7916,7 +8079,7 @@ create_options_are_valid(
if (srv_file_format < DICT_TF_FORMAT_ZIP) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_format > Antelope.");
ret = FALSE;
@@ -7925,7 +8088,7 @@ create_options_are_valid(
default:
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: invalid KEY_BLOCK_SIZE = %lu."
" Valid values are [1, 2, 4, 8, 16]",
create_info->key_block_size);
@@ -7950,7 +8113,7 @@ create_options_are_valid(
if (kbs_specified) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: cannot specify ROW_FORMAT = %s"
" with KEY_BLOCK_SIZE.",
get_row_format_name(row_format));
@@ -7964,7 +8127,7 @@ create_options_are_valid(
case ROW_TYPE_NOT_USED:
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION, \
+ HA_WRONG_CREATE_OPTION, \
"InnoDB: invalid ROW_FORMAT specifier.");
ret = FALSE;
break;
@@ -8067,7 +8230,7 @@ ha_innobase::create(
/* Validate create options if innodb_strict_mode is set. */
if (!create_options_are_valid(thd, form, create_info)) {
- DBUG_RETURN(ER_ILLEGAL_HA_CREATE_OPTION);
+ DBUG_RETURN(HA_WRONG_CREATE_OPTION);
}
if (create_info->key_block_size) {
@@ -8093,7 +8256,7 @@ ha_innobase::create(
if (!srv_file_per_table) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_per_table.");
flags = 0;
@@ -8102,7 +8265,7 @@ ha_innobase::create(
if (file_format < DICT_TF_FORMAT_ZIP) {
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_format > Antelope.");
flags = 0;
@@ -8111,7 +8274,7 @@ ha_innobase::create(
if (!flags) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: ignoring KEY_BLOCK_SIZE=%lu.",
create_info->key_block_size);
}
@@ -8133,7 +8296,7 @@ ha_innobase::create(
with ALTER TABLE anyway. */
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: ignoring KEY_BLOCK_SIZE=%lu"
" unless ROW_FORMAT=COMPRESSED.",
create_info->key_block_size);
@@ -8164,14 +8327,14 @@ ha_innobase::create(
if (!srv_file_per_table) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_per_table.",
get_row_format_name(row_format));
} else if (file_format < DICT_TF_FORMAT_ZIP) {
push_warning_printf(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_format > Antelope.",
get_row_format_name(row_format));
@@ -8188,7 +8351,7 @@ ha_innobase::create(
case ROW_TYPE_PAGE:
push_warning(
thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA_CREATE_OPTION,
+ HA_WRONG_CREATE_OPTION,
"InnoDB: assuming ROW_FORMAT=COMPACT.");
case ROW_TYPE_DEFAULT:
case ROW_TYPE_COMPACT:
@@ -11127,7 +11290,8 @@ ha_innobase::store_lock(
prebuilt->select_lock_type = LOCK_NONE;
prebuilt->stored_select_lock_type = LOCK_NONE;
- } else if (sql_command == SQLCOM_CHECKSUM) {
+ } else if (sql_command == SQLCOM_CHECKSUM ||
+ sql_command == SQLCOM_ANALYZE) {
/* Use consistent read for checksum table */
prebuilt->select_lock_type = LOCK_NONE;
diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h
index 1c1a04ed5e1..359d0b95367 100644
--- a/storage/xtradb/handler/ha_innodb.h
+++ b/storage/xtradb/handler/ha_innodb.h
@@ -123,7 +123,6 @@ class ha_innobase: public handler
*/
enum row_type get_row_type() const;
- const char* table_type() const;
const char* index_type(uint key_number);
const char** bas_ext() const;
Table_flags table_flags() const;
diff --git a/storage/xtradb/include/ha_prototypes.h b/storage/xtradb/include/ha_prototypes.h
index 51e331c3b5d..c2209c89029 100644
--- a/storage/xtradb/include/ha_prototypes.h
+++ b/storage/xtradb/include/ha_prototypes.h
@@ -136,6 +136,17 @@ innobase_mysql_print_thd(
uint max_query_len); /*!< in: max query length to print, or 0 to
use the default max length */
+/*****************************************************************//**
+Log code calls this whenever log has been written and/or flushed up
+to a new position. We use this to notify upper layer of a new commit
+checkpoint when necessary.*/
+UNIV_INTERN
+void
+innobase_mysql_log_notify(
+/*===============*/
+ ib_uint64_t write_lsn, /*!< in: LSN written to log file */
+ ib_uint64_t flush_lsn); /*!< in: LSN flushed to disk */
+
/**************************************************************//**
Converts a MySQL type to an InnoDB type. Note that this function returns
the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1
diff --git a/storage/xtradb/include/log0log.h b/storage/xtradb/include/log0log.h
index 9a149bcfa76..4f017e01dfd 100644
--- a/storage/xtradb/include/log0log.h
+++ b/storage/xtradb/include/log0log.h
@@ -154,6 +154,13 @@ UNIV_INLINE
ib_uint64_t
log_get_lsn(void);
/*=============*/
+/************************************************************//**
+Gets the last lsn that is fully flushed to disk.
+@return last flushed lsn */
+UNIV_INLINE
+ib_uint64_t
+log_get_flush_lsn(void);
+/*=============*/
/****************************************************************
Gets the log group capacity. It is OK to read the value without
holding log_sys->mutex because it is constant.
diff --git a/storage/xtradb/include/log0log.ic b/storage/xtradb/include/log0log.ic
index 67db6695cab..b54697637b0 100644
--- a/storage/xtradb/include/log0log.ic
+++ b/storage/xtradb/include/log0log.ic
@@ -411,6 +411,25 @@ log_get_lsn(void)
return(lsn);
}
+/************************************************************//**
+Gets the last lsn that is fully flushed to disk.
+@return last flushed lsn */
+UNIV_INLINE
+ib_uint64_t
+log_get_flush_lsn(void)
+/*=============*/
+{
+ ib_uint64_t lsn;
+
+ mutex_enter(&(log_sys->mutex));
+
+ lsn = log_sys->flushed_to_disk_lsn;
+
+ mutex_exit(&(log_sys->mutex));
+
+ return(lsn);
+}
+
/****************************************************************
Gets the log group capacity. It is OK to read the value without
holding log_sys->mutex because it is constant.
diff --git a/storage/xtradb/include/trx0trx.h b/storage/xtradb/include/trx0trx.h
index eded5c303fa..a03f7aceafa 100644
--- a/storage/xtradb/include/trx0trx.h
+++ b/storage/xtradb/include/trx0trx.h
@@ -494,7 +494,6 @@ struct trx_struct{
this is set to 1 then registered should
also be set to 1. This is used in the
XA code */
- unsigned called_commit_ordered:1;/* 1 if innobase_commit_ordered has run. */
/*------------------------------*/
ulint isolation_level;/* TRX_ISO_REPEATABLE_READ, ... */
ulint check_foreigns; /* normally TRUE, but if the user
diff --git a/storage/xtradb/log/log0log.c b/storage/xtradb/log/log0log.c
index c91d39e6b38..e7c7a165b9c 100644
--- a/storage/xtradb/log/log0log.c
+++ b/storage/xtradb/log/log0log.c
@@ -1466,6 +1466,8 @@ log_write_up_to(
ulint loop_count = 0;
#endif /* UNIV_DEBUG */
ulint unlock;
+ ib_uint64_t write_lsn;
+ ib_uint64_t flush_lsn;
if (recv_no_ibuf_operations) {
/* Recovery is running and no operations on the log files are
@@ -1644,8 +1646,13 @@ loop:
log_flush_do_unlocks(unlock);
+ write_lsn = log_sys->write_lsn;
+ flush_lsn = log_sys->flushed_to_disk_lsn;
+
mutex_exit(&(log_sys->mutex));
+ innobase_mysql_log_notify(write_lsn, flush_lsn);
+
return;
do_waits:
diff --git a/storage/xtradb/trx/trx0trx.c b/storage/xtradb/trx/trx0trx.c
index eea8e861387..99b4276fbee 100644
--- a/storage/xtradb/trx/trx0trx.c
+++ b/storage/xtradb/trx/trx0trx.c
@@ -1099,7 +1099,8 @@ trx_commit_off_kernel(
trx->must_flush_log_later = TRUE;
} else if (flush_log_at_trx_commit == 0) {
/* Do nothing */
- } else if (flush_log_at_trx_commit == 1) {
+ } else if (flush_log_at_trx_commit == 1 ||
+ flush_log_at_trx_commit == 3) {
if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) {
/* Write the log but do not flush it to disk */
@@ -1809,7 +1810,11 @@ trx_commit_complete_for_mysql(
/* Do nothing */
} else if (flush_log_at_trx_commit == 0) {
/* Do nothing */
- } else if (flush_log_at_trx_commit == 1) {
+ } else if (flush_log_at_trx_commit == 1 && trx->active_commit_ordered) {
+ /* Do nothing - we already flushed the prepare and binlog write
+ to disk, so transaction is durable (will be recovered from
+ binlog if necessary) */
+ } else if (flush_log_at_trx_commit == 1 || flush_log_at_trx_commit == 3) {
if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) {
/* Write the log but do not flush it to disk */
@@ -2114,7 +2119,7 @@ trx_prepare_off_kernel(
if (flush_log_at_trx_commit == 0) {
/* Do nothing */
- } else if (flush_log_at_trx_commit == 1) {
+ } else if (flush_log_at_trx_commit == 1 || flush_log_at_trx_commit == 3) {
if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) {
/* Write the log but do not flush it to disk */
diff --git a/strings/my_vsnprintf.c b/strings/my_vsnprintf.c
index 55d907acf1c..362dafb76ce 100644
--- a/strings/my_vsnprintf.c
+++ b/strings/my_vsnprintf.c
@@ -17,10 +17,14 @@
#include "strings_def.h"
#include <m_ctype.h>
#include <stdarg.h>
+#include <my_sys.h>
+#include <my_base.h>
+#include <my_handler_errors.h>
#define MAX_ARGS 32 /* max positional args count*/
#define MAX_PRINT_INFO 32 /* max print position count */
+#define MAX_WIDTH 65535
#define LENGTH_ARG 1
#define WIDTH_ARG 2
@@ -65,6 +69,7 @@ struct print_info
static const char *get_length(const char *fmt, size_t *length, uint *pre_zero)
{
+
for (; my_isdigit(&my_charset_latin1, *fmt); fmt++)
{
*length= *length * 10 + (uint)(*fmt - '0');
@@ -75,23 +80,28 @@ static const char *get_length(const char *fmt, size_t *length, uint *pre_zero)
}
-/**
- Calculates print width or index of positional argument
+/*
+ Get argument for '*' parameter
@param fmt processed string
- @param width print width or index of positional argument
+ @param args_arr Arguments to printf
+ @param arg_count Number of arguments to printf
+ @param length returns length of argument
+ @param flag returns flags with PREZERO_ARG set if necessary
- @retval
- string position right after width digits
+ @return new fmt
*/
-static const char *get_width(const char *fmt, size_t *width)
+static const char *get_length_arg(const char *fmt, ARGS_INFO *args_arr,
+ uint *arg_count, size_t *length, uint *flags)
{
- for (; my_isdigit(&my_charset_latin1, *fmt); fmt++)
- {
- *width= *width * 10 + (uint)(*fmt - '0');
- }
- return fmt;
+ fmt= get_length(fmt+1, length, flags);
+ *arg_count= max(*arg_count, (uint) *length);
+ (*length)--;
+ DBUG_ASSERT(*fmt == '$' && *length < MAX_ARGS);
+ args_arr[*length].arg_type= 'd';
+ args_arr[*length].have_longlong= 0;
+ return fmt+1;
}
/**
@@ -123,6 +133,8 @@ static const char *check_longlong(const char *fmt, uint *have_longlong)
fmt++;
*have_longlong= (sizeof(size_t) == sizeof(longlong));
}
+ else if (*fmt == 'p')
+ *have_longlong= (sizeof(void *) == sizeof(longlong));
return fmt;
}
@@ -227,7 +239,7 @@ static char *process_bin_arg(char *to, char *end, size_t width, char *par)
static char *process_dbl_arg(char *to, char *end, size_t width,
double par, char arg_type)
{
- if (width == SIZE_T_MAX)
+ if (width == MAX_WIDTH)
width= FLT_DIG; /* width not set, use default */
else if (width >= NOT_FIXED_DEC)
width= NOT_FIXED_DEC - 1; /* max.precision for my_fcvt() */
@@ -338,42 +350,31 @@ start:
/* Get print length */
if (*fmt == '*')
{
- fmt++;
- fmt= get_length(fmt, &print_arr[idx].length, &print_arr[idx].flags);
- print_arr[idx].length--;
- DBUG_ASSERT(*fmt == '$' && print_arr[idx].length < MAX_ARGS);
- args_arr[print_arr[idx].length].arg_type= 'd';
+ fmt= get_length_arg(fmt, args_arr, &arg_count, &print_arr[idx].length,
+ &print_arr[idx].flags);
print_arr[idx].flags|= LENGTH_ARG;
- arg_count= max(arg_count, print_arr[idx].length + 1);
- fmt++;
}
else
fmt= get_length(fmt, &print_arr[idx].length, &print_arr[idx].flags);
if (*fmt == '.')
{
+ uint unused_flags= 0;
fmt++;
/* Get print width */
if (*fmt == '*')
{
- fmt++;
- fmt= get_width(fmt, &print_arr[idx].width);
- print_arr[idx].width--;
- DBUG_ASSERT(*fmt == '$' && print_arr[idx].width < MAX_ARGS);
- args_arr[print_arr[idx].width].arg_type= 'd';
+ fmt= get_length_arg(fmt, args_arr, &arg_count, &print_arr[idx].width,
+ &unused_flags);
print_arr[idx].flags|= WIDTH_ARG;
- arg_count= max(arg_count, print_arr[idx].width + 1);
- fmt++;
}
else
- fmt= get_width(fmt, &print_arr[idx].width);
+ fmt= get_length(fmt, &print_arr[idx].width, &unused_flags);
}
else
- print_arr[idx].width= SIZE_T_MAX;
+ print_arr[idx].width= MAX_WIDTH;
fmt= check_longlong(fmt, &args_arr[arg_index].have_longlong);
- if (*fmt == 'p')
- args_arr[arg_index].have_longlong= (sizeof(void *) == sizeof(longlong));
args_arr[arg_index].arg_type= print_arr[idx].arg_type= *fmt;
print_arr[idx].arg_idx= arg_index;
@@ -412,6 +413,7 @@ start:
else
args_arr[i].longlong_arg= va_arg(ap, uint);
break;
+ case 'M':
case 'c':
args_arr[i].longlong_arg= va_arg(ap, int);
break;
@@ -472,17 +474,34 @@ start:
? (size_t)args_arr[print_arr[i].length].longlong_arg
: print_arr[i].length;
- if (args_arr[print_arr[i].arg_idx].have_longlong)
- larg = args_arr[print_arr[i].arg_idx].longlong_arg;
- else if (print_arr[i].arg_type == 'd' || print_arr[i].arg_type == 'i' )
- larg = (int) args_arr[print_arr[i].arg_idx].longlong_arg;
- else
- larg= (uint) args_arr[print_arr[i].arg_idx].longlong_arg;
-
+ larg = args_arr[print_arr[i].arg_idx].longlong_arg;
to= process_int_arg(to, end, length, larg, print_arr[i].arg_type,
print_arr[i].flags);
break;
}
+ case 'M':
+ {
+ longlong larg;
+ char *org_to= to;
+ char errmsg_buff[MYSYS_STRERROR_SIZE];
+
+ length= (print_arr[i].flags & WIDTH_ARG)
+ ? (size_t)args_arr[print_arr[i].width].longlong_arg
+ : print_arr[i].width;
+
+ larg = args_arr[print_arr[i].arg_idx].longlong_arg;
+ to= process_int_arg(to, end, 0, larg, 'd', print_arr[i].flags);
+ width-= (to - org_to);
+ if (width <= 4)
+ break;
+ *to++= ' ';
+ *to++= '"';
+ my_strerror(errmsg_buff, sizeof(errmsg_buff), (int) larg);
+ to= process_str_arg(cs, to, end, width-3, errmsg_buff,
+ print_arr[i].flags);
+ *to++= '"';
+ break;
+ }
default:
break;
}
@@ -490,6 +509,7 @@ start:
if (to == end)
break;
+ /* Copy data after the % format expression until next % */
length= min(end - to , print_arr[i].end - print_arr[i].begin);
if (to + length < end)
length++;
@@ -501,13 +521,14 @@ start:
}
else
{
+ uint unused_flags= 0;
/* Process next positional argument*/
DBUG_ASSERT(*fmt == '%');
print_arr[idx].end= fmt - 1;
idx++;
fmt++;
arg_index= 0;
- fmt= get_width(fmt, &arg_index);
+ fmt= get_length(fmt, &arg_index, &unused_flags);
DBUG_ASSERT(*fmt == '$');
fmt++;
arg_count= max(arg_count, arg_index);
@@ -585,6 +606,7 @@ size_t my_vsnprintf_ex(CHARSET_INFO *cs, char *to, size_t n,
if (*fmt == '.')
{
+ uint unused_flags= 0;
fmt++;
if (*fmt == '*')
{
@@ -592,10 +614,10 @@ size_t my_vsnprintf_ex(CHARSET_INFO *cs, char *to, size_t n,
width= va_arg(ap, int);
}
else
- fmt= get_width(fmt, &width);
+ fmt= get_length(fmt, &width, &unused_flags);
}
else
- width= SIZE_T_MAX;
+ width= MAX_WIDTH;
fmt= check_longlong(fmt, &have_longlong);
@@ -622,8 +644,6 @@ size_t my_vsnprintf_ex(CHARSET_INFO *cs, char *to, size_t n,
{
/* Integer parameter */
longlong larg;
- if (*fmt == 'p')
- have_longlong= (sizeof(void *) == sizeof(longlong));
if (have_longlong)
larg = va_arg(ap,longlong);
@@ -644,9 +664,27 @@ size_t my_vsnprintf_ex(CHARSET_INFO *cs, char *to, size_t n,
*to++= (char) larg;
continue;
}
+ else if (*fmt == 'M')
+ {
+ const char *org_to= to;
+ int larg= va_arg(ap, int);
+ to= process_int_arg(to, end, 0, larg, 'd', print_type);
+ width-= (to - org_to);
+ if ((end - to) >= 3 && (int) width >= 3)
+ {
+ char errmsg_buff[MYSYS_STRERROR_SIZE];
+ *to++= ' ';
+ *to++= '"';
+ my_strerror(errmsg_buff, sizeof(errmsg_buff), larg);
+ to= process_str_arg(cs, to, end, width-3, errmsg_buff, print_type);
+ if (end > to)
+ *to++= '"';
+ }
+ continue;
+ }
/* We come here on '%%', unknown code or too long parameter */
- if (to == end)
+ if (to >= end)
break;
*to++='%'; /* % used as % or unknown code */
}
@@ -742,3 +780,67 @@ int my_fprintf(FILE *stream, const char* format, ...)
va_end(args);
return result;
}
+
+
+/*
+ Return system error text for given error number
+
+ @param buf Buffer (of size MYSYS_STRERROR_SIZE)
+ @param len Length of buffer
+ @param nr Error number
+*/
+
+void my_strerror(char *buf, size_t len, int nr)
+{
+ char *msg= NULL;
+
+ buf[0]= '\0'; /* failsafe */
+
+ if (nr <= 0)
+ {
+ strmake(buf, (nr == 0 ?
+ "Internal error/check (Not system error)" :
+ "Internal error < 0 (Not system error)"),
+ len-1);
+ return;
+ }
+
+ /*
+ These (handler-) error messages are shared by perror, as required
+ by the principle of least surprise.
+ */
+ if ((nr >= HA_ERR_FIRST) && (nr <= HA_ERR_LAST))
+ {
+ msg= (char *) handler_error_messages[nr - HA_ERR_FIRST];
+ strmake(buf, msg, len - 1);
+ }
+ else
+ {
+ /*
+ On Windows, do things the Windows way. On a system that supports both
+ the GNU and the XSI variant, use whichever was configured (GNU); if
+ this choice is not advertised, use the default (POSIX/XSI). Testing
+ for __GNUC__ is not sufficient to determine whether this choice exists.
+ */
+#if defined(__WIN__)
+ strerror_s(buf, len, nr);
+#elif ((defined _POSIX_C_SOURCE && (_POSIX_C_SOURCE >= 200112L)) || \
+ (defined _XOPEN_SOURCE && (_XOPEN_SOURCE >= 600))) && \
+ ! defined _GNU_SOURCE
+ strerror_r(nr, buf, len); /* I can build with or without GNU */
+#elif defined _GNU_SOURCE
+ char *r= strerror_r(nr, buf, len);
+ if (r != buf) /* Want to help, GNU? */
+ strmake(buf, r, len - 1); /* Then don't. */
+#else
+ strerror_r(nr, buf, len);
+#endif
+ }
+
+ /*
+ strerror() return values are implementation-dependent, so let's
+ be pragmatic.
+ */
+ if (!buf[0])
+ strmake(buf, "unknown error", len - 1);
+}
diff --git a/support-files/compiler_warnings.supp b/support-files/compiler_warnings.supp
index 809369bc436..34f368c4373 100644
--- a/support-files/compiler_warnings.supp
+++ b/support-files/compiler_warnings.supp
@@ -28,7 +28,6 @@ pars0grm.c: 'yyerrorlab' : unreferenced label
_flex_tmp.c: .*not enough actual parameters for macro 'yywrap'.*
lexyy.c : not enough actual parameters for macro 'yywrap'
pars0lex.l: .*conversion from 'ulint' to 'int', possible loss of data.*
-btr/btr0cur\.c: .*value computed is not used.*: 3175-3375
include/buf0buf\.ic: unused parameter .*mtr.*
fil/fil0fil\.c: pointer targets in passing argument.*differ in signedness
fil/fil0fil\.c: comparison between signed and unsigned : 3100-3199
@@ -44,7 +43,9 @@ ut/ut0ut\.c: ignoring return value of
srv/srv0srv\.c: value computed is not used
buf/buf0buf\.c: .*block_mutex.* might be used uninitialized
btr/btr0cur\.c: null argument where non-null required: 1800-3000
-btr/btr0btr\.c: null argument where non-null required: 2500-3000
+btr/btr0btr\.c: null argument where non-null required
+btr/btr0cur\.c: .*value computed is not used.*: 3175-3375
+btr/btr0sea\.c: passing argument 2 .* discards qualifiers from pointer target type
ibuf/ibuf0ibuf.c: null argument where non-null required: 700-1000
fsp0fsp\.c: result of 32-bit shift implicitly converted to 64 bits
@@ -160,6 +161,13 @@ mySTL/algorithm\.hpp: is used uninitialized in this function
include/pwdbased\.hpp: comparison of unsigned expression
#
+# OpenSSL
+#
+# The following comes because of different prototype between yassl and openssl.
+# Save as the argument is a function withing the library.
+vio/viosslfactories\.c: discards ~const~ qualifier from pointer target type
+
+#
# Groff warnings on OpenSUSE.
#
.*/dbug/.*(groff|<standard input>) : .*
@@ -193,6 +201,9 @@ ma_packrec\.c : .*result of 32-bit shift implicitly converted to 64 bits.* : 550
#
.* : .*no matching operator delete found; memory will not be freed if initialization throws an exception.*
ctype-simple\.c : .*unary minus operator applied to unsigned type, result still unsigned.*
+sql/sys_vars\.cc : invalid access to non-static data member
+string3\.h : memset used with constant zero length parameter
+
# Wrong warning due to GCC bug: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29478
regexec\.c : passing argument 3 of.*matcher.* discards qualifiers from pointer target type
diff --git a/unittest/mysys/my_vsnprintf-t.c b/unittest/mysys/my_vsnprintf-t.c
index 06f6878826a..5809d6144cc 100644
--- a/unittest/mysys/my_vsnprintf-t.c
+++ b/unittest/mysys/my_vsnprintf-t.c
@@ -19,7 +19,7 @@
char buf[1024]; /* let's hope that's enough */
-void test1(const char *res, const char *fmt, ...)
+static void test1(const char *res, const char *fmt, ...)
{
va_list args;
size_t len;
@@ -29,9 +29,29 @@ void test1(const char *res, const char *fmt, ...)
ok(strlen(res) == len && strcmp(buf, res) == 0, "\"%s\"", buf);
}
+static void test_many(const char **res, const char *fmt, ...)
+{
+ va_list args;
+ size_t len;
+ va_start(args,fmt);
+ len= my_vsnprintf(buf, sizeof(buf)-1, fmt, args);
+ va_end(args);
+
+ for (; *res ; res++)
+ {
+ if (strlen(*res) == len && strcmp(buf, *res) == 0)
+ {
+ ok(1, "\"%s\"", buf);
+ return;
+ }
+ }
+ ok(0, "\"%s\"", buf);
+}
+
+
int main(void)
{
- plan(58);
+ plan(59);
test1("Constant string",
"Constant string");
@@ -177,6 +197,17 @@ int main(void)
test1("My `DDDD` test CCCC, `DDD`",
"My %1$`s test %2$s, %1$`-.3s", "DDDD", "CCCC");
+ {
+ /* Test that %M works */
+ const char *results[]=
+ {
+ "Error 1 \"Operation not permitted\"", /* Linux */
+ "Error 1 \"Not owner\"", /* Solaris */
+ NullS
+ };
+ test_many(results, "Error %M", 1);
+ }
+
return exit_status();
}
diff --git a/unittest/sql/CMakeLists.txt b/unittest/sql/CMakeLists.txt
new file mode 100644
index 00000000000..36b14e9a08a
--- /dev/null
+++ b/unittest/sql/CMakeLists.txt
@@ -0,0 +1,3 @@
+
+MY_ADD_TESTS(my_apc LINK_LIBRARIES mysys EXT cc)
+
diff --git a/unittest/sql/my_apc-t.cc b/unittest/sql/my_apc-t.cc
new file mode 100644
index 00000000000..8b599198302
--- /dev/null
+++ b/unittest/sql/my_apc-t.cc
@@ -0,0 +1,227 @@
+/*
+ Copyright (c) 2012, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ This file does standalone APC system tests.
+*/
+#include <stdio.h>
+#include <my_global.h>
+#include <my_pthread.h>
+#include <my_sys.h>
+
+#include <tap.h>
+
+/*
+ A fake THD with enter_cond/exit_cond and some other members.
+*/
+class THD
+{
+ mysql_mutex_t* thd_mutex;
+public:
+ bool killed;
+
+ THD() : killed(FALSE) {}
+ inline const char* enter_cond(mysql_cond_t *cond, mysql_mutex_t* mutex,
+ const char* msg)
+ {
+ mysql_mutex_assert_owner(mutex);
+ thd_mutex= mutex;
+ return NULL;
+ }
+ inline void exit_cond(const char* old_msg)
+ {
+ mysql_mutex_unlock(thd_mutex);
+ }
+};
+
+#include "../sql/my_apc.h"
+
+#define MY_APC_STANDALONE 1
+#include "../sql/my_apc.cc"
+
+volatile bool started= FALSE;
+volatile bool service_should_exit= FALSE;
+volatile bool requestors_should_exit=FALSE;
+
+/* Counters for APC calls */
+int apcs_served= 0;
+int apcs_missed=0;
+int apcs_timed_out=0;
+mysql_mutex_t apc_counters_mutex;
+
+inline void increment_counter(int *var)
+{
+ mysql_mutex_lock(&apc_counters_mutex);
+ *var= *var+1;
+ mysql_mutex_unlock(&apc_counters_mutex);
+}
+
+volatile bool have_errors= false;
+
+Apc_target apc_target;
+mysql_mutex_t target_mutex;
+
+int int_rand(int size)
+{
+ return (int) (0.5 + ((double)rand() / RAND_MAX) * size);
+}
+
+/*
+ APC target thread (the one that will serve the APC requests). We will have
+ one target.
+*/
+void *test_apc_service_thread(void *ptr)
+{
+ my_thread_init();
+ mysql_mutex_init(0, &target_mutex, MY_MUTEX_INIT_FAST);
+ apc_target.init(&target_mutex);
+ apc_target.enable();
+ started= TRUE;
+ diag("test_apc_service_thread started");
+ while (!service_should_exit)
+ {
+ //apc_target.disable();
+ my_sleep(10000);
+ //apc_target.enable();
+ for (int i = 0; i < 10 && !service_should_exit; i++)
+ {
+ apc_target.process_apc_requests();
+ my_sleep(int_rand(30));
+ }
+ }
+ apc_target.disable();
+ apc_target.destroy();
+ mysql_mutex_destroy(&target_mutex);
+ my_thread_end();
+ pthread_exit(0);
+ return NULL;
+}
+
+
+/*
+ One APC request (to write 'value' into *where_to)
+*/
+class Apc_order : public Apc_target::Apc_call
+{
+public:
+ int value; // The value
+ int *where_to; // Where to write it
+ Apc_order(int a, int *b) : value(a), where_to(b) {}
+
+ void call_in_target_thread()
+ {
+ my_sleep(int_rand(1000));
+ *where_to = value;
+ increment_counter(&apcs_served);
+ }
+};
+
+
+/*
+ APC requestor thread. It makes APC requests, and checks if they were actually
+ executed.
+*/
+void *test_apc_requestor_thread(void *ptr)
+{
+ my_thread_init();
+ diag("test_apc_requestor_thread started");
+ THD my_thd;
+
+ while (!requestors_should_exit)
+ {
+ int dst_value= 0;
+ int src_value= int_rand(4*1000*100);
+ /* Create an APC to do "dst_value= src_value" assignment */
+ Apc_order apc_order(src_value, &dst_value);
+ bool timed_out;
+
+ mysql_mutex_lock(&target_mutex);
+ bool res= apc_target.make_apc_call(&my_thd, &apc_order, 60, &timed_out);
+ if (res)
+ {
+ if (timed_out)
+ increment_counter(&apcs_timed_out);
+ else
+ increment_counter(&apcs_missed);
+
+ if (dst_value != 0)
+ {
+ diag("APC was done even though return value says it wasnt!");
+ have_errors= true;
+ }
+ }
+ else
+ {
+ if (dst_value != src_value)
+ {
+ diag("APC was not done even though return value says it was!");
+ have_errors= true;
+ }
+ }
+ //my_sleep(300);
+ }
+ diag("test_apc_requestor_thread exiting");
+ my_thread_end();
+ return NULL;
+}
+
+/* Number of APC requestor threads */
+const int N_THREADS=23;
+
+
+int main(int args, char **argv)
+{
+ pthread_t service_thr;
+ pthread_t request_thr[N_THREADS];
+ int i;
+
+ my_thread_global_init();
+
+ mysql_mutex_init(0, &apc_counters_mutex, MY_MUTEX_INIT_FAST);
+
+ plan(1);
+ diag("Testing APC delivery and execution");
+
+ pthread_create(&service_thr, NULL, test_apc_service_thread, (void*)NULL);
+ while (!started)
+ my_sleep(1000);
+ for (i = 0; i < N_THREADS; i++)
+ pthread_create(&request_thr[i], NULL, test_apc_requestor_thread, (void*)NULL);
+
+ for (i = 0; i < 15; i++)
+ {
+ my_sleep(500*1000);
+ diag("%d APCs served %d missed", apcs_served, apcs_missed);
+ }
+ diag("Shutting down requestors");
+ requestors_should_exit= TRUE;
+ for (i = 0; i < N_THREADS; i++)
+ pthread_join(request_thr[i], NULL);
+
+ diag("Shutting down service");
+ service_should_exit= TRUE;
+ pthread_join(service_thr, NULL);
+
+ mysql_mutex_destroy(&apc_counters_mutex);
+
+ diag("Done");
+ my_thread_end();
+ my_thread_global_end();
+
+ ok1(!have_errors);
+ return exit_status();
+}
+